prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: { | np.round(a_p, 2) | numpy.round |
"""
This module contains the HomogeneousReactor class that provides a high-level interface for a variety of 0-D reactors
"""
# Spitfire - a Python-C++ library for building tabulated chemistry models and solving differential equations
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
#
# You should have received a copy of the 3-clause BSD License
# along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
#
# Questions? Contact <NAME> (<EMAIL>)
from spitfire.time.integrator import odesolve
from spitfire.time.methods import KennedyCarpenterS6P4Q3
from spitfire.time.nonlinear import SimpleNewtonSolver
from spitfire.time.stepcontrol import PIController
from spitfire.chemistry.library import Dimension, Library
import numpy as np
from numpy import zeros, hstack, sqrt, sum
from scipy.linalg.lapack import dgetrf as lapack_lu_factor
from scipy.linalg.lapack import dgetrs as lapack_lu_solve
from scipy.sparse.linalg import splu as superlu_factor
from scipy.sparse import csc_matrix as sparse_csc_matrix
import matplotlib.pyplot as plt
class HomogeneousReactor(object):
"""A class for solving zero-dimensional reactors
**Constructor**: specify a mechanism, initial mixture, and reactor specifications
Parameters
----------
mech_spec : spitfire.chemistry.mechanism.ChemicalMechanismSpec instance
the mechanism
initial_mixture : Cantera.Quantity (a Spitfire stream) or Cantera.Solution object
the initial mixture of the reactor
configuration : str
whether the reactor is constant-volume (isochoric) or constant-pressure (isobaric)
heat_transfer : str
whether the reactor is adiabatic, isothermal, or diathermal (finite-rate heat transfer by convection and/or radiation)
mass_transfer : str
whether the reactor is closed or open
convection_temperature : float or callable
the temperature of external fluid in a diathermal reactor, either a constant or a function of time, f(t)
radiation_temperature : float or callable
the temperature of external radiation body of a diathermal reactor, either a constant or a function of time, f(t)
convection_coefficient : float or callable
the convection coefficient, either a constant or a function of time, f(t)
radiative_emissivity : float or callable
the radiative emissivity, either a constant or a function of time, f(t)
shape_dimension_dict : float or callable
The shape and dimension of a diathermal reactor. The shape is one of 'cube', 'sphere', 'capsule', 'tetrahedron', 'octahedron', or 'icosahedron'
(see `wikipedia <https://en.wikipedia.org/wiki/Surface-area-to-volume_ratio#Mathematical_examples>`_).
The dimension is either the characteristic length ('char. length') in meters or volume ('volume') in cubic meters.
mixing_tau : float or callable
The mixing time of an open reactor, either a constant or a function of time, f(t)
feed_temperature : float or callable
the temperature of the feed stream of an open reactor, either a constant or a function of time, f(t)
feed_mass_fractions : np.array or callable
the mass fractions of the feed stream of an open reactor, either a constant or a function of time, f(t)
feed_density : float or callable
the density of the feed stream of an open reactor, either a constant or a function of time, f(t)
rates_sensitivity_type : str
how the chemical source term Jacobian is formed, either 'dense' or 'sparse' for exact formulations
or 'no-TBAF' which ignores third-body and falloff sensitivities. The default is 'dense'.
For large mechanisms (over 100 species) the 'sparse' formulation is far faster than 'dense',
especially for mechanisms of more than 300 species.
sensitivity_transform_type : str
how the Jacobian is transformed for isobaric systems, currently only 'exact' is supported
initial_time : float
the starting time point (in seconds) of the reactor, default to 0.0
"""
_configurations = ['constant pressure', 'constant volume', 'isobaric', 'isochoric']
_configuration_dict = {'constant pressure': 'isobaric',
'isobaric': 'isobaric',
'constant volume': 'isochoric',
'isochoric': 'isochoric'}
_heat_transfers = ['adiabatic', 'isothermal', 'diathermal']
_mass_transfers = ['closed', 'open']
_shape_dict = {'cube': {'l->sov': lambda a: 6. / a,
'v->sov': lambda v: 6. / (np.power(v, 1. / 3.))},
'sphere': {'l->sov': lambda a: 3. / a,
'v->sov': lambda v: 3. / (np.power(3. * v / (4. * np.pi), 1. / 3.))},
'capsule': {'l->sov': lambda a: 12. / (5. * a),
'v->sov': lambda v: 12. / (5. * np.power(3. * v / (10. * np.pi), 1. / 3.))},
'tetrahedron': {'l->sov': lambda a: 6. * sqrt(6.) / a,
'v->sov': lambda v: 6. * sqrt(6.) / np.power(12. * v / np.sqrt(2.), 1. / 3.)},
'octahedron': {'l->sov': lambda a: 3. * sqrt(6.) / a,
'v->sov': lambda v: 3. * sqrt(6.) / np.power(3. * v / np.sqrt(2.), 1. / 3.)},
'icosahedron': {'l->sov': lambda a: 12. * sqrt(3.) / ((3. + sqrt(5.)) * a),
'v->sov': lambda v: 12. * sqrt(3.) / (
(3. + sqrt(5.)) * np.power(12. * v / 5. / (3. + sqrt(5.)), 1. / 3.))}}
_shapes = list(_shape_dict.keys())
@classmethod
def _check_constructor_argument(cls, argument, description, acceptable_values):
if argument.lower() in acceptable_values:
return True
else:
raise ValueError(
"""
Error in reactor construction:
Bad {:} argument detected.
Argument given: {:}
Acceptable values: {:}
""".format(description, argument, acceptable_values))
@classmethod
def _warn_unused_argument(cls, argument_should_be_none, unused_argument, reason):
if argument_should_be_none is not None:
print(
"""
Warning in reactor construction:
The {:} argument is unused.
Reason: {:}
""".format(unused_argument, reason))
@classmethod
def _check_necessary_argument(cls, argument_should_be_not_none, unspecified_argument, reason):
if argument_should_be_not_none is None:
raise ValueError(
"""
Error in reactor construction:
The {:} argument is needed but was unspecified.
Reason: {:}
""".format(unspecified_argument, reason))
else:
return True
def __init__(self,
mech_spec,
initial_mixture,
configuration,
heat_transfer,
mass_transfer,
convection_temperature=None,
radiation_temperature=None,
convection_coefficient=None,
radiative_emissivity=None,
shape_dimension_dict=None,
mixing_tau=None,
feed_temperature=None,
feed_mass_fractions=None,
feed_density=None,
rates_sensitivity_type='dense',
sensitivity_transform_type='exact',
initial_time=0.):
# check configuration (isobaric/isochoric), heat transfer, and mass transfer
if self._check_constructor_argument(configuration, 'configuration', self._configurations):
self._configuration = self._configuration_dict[configuration.lower()]
if self._check_constructor_argument(heat_transfer, 'heat transfer', self._heat_transfers):
self._heat_transfer = heat_transfer.lower()
if self._check_constructor_argument(mass_transfer, 'mass transfer', self._mass_transfers):
self._mass_transfer = mass_transfer.lower()
# save heat transfer parameters, check validity, and warn for unused parameters
if self._heat_transfer == 'adiabatic' or self._heat_transfer == 'isothermal':
self._convection_temperature = 0.
self._radiation_temperature = 0.
self._convection_coefficient = 0.
self._radiative_emissivity = 0.
self._surface_area_to_volume = 0.
message = 'heat transfer is not set to diathermal'
self._warn_unused_argument(convection_temperature, 'convection_temperature', message)
self._warn_unused_argument(radiation_temperature, 'radiation_temperature', message)
self._warn_unused_argument(convection_coefficient, 'convection_coefficient', message)
self._warn_unused_argument(radiative_emissivity, 'radiative_emissivity', message)
self._warn_unused_argument(shape_dimension_dict, 'shape_dimension_dict', message)
else:
message = 'heat transfer is set to diathermal'
if self._check_necessary_argument(convection_temperature, 'convection_temperature', message):
self._convection_temperature = convection_temperature
if self._check_necessary_argument(radiation_temperature, 'radiation_temperature', message):
self._radiation_temperature = radiation_temperature
if self._check_necessary_argument(convection_coefficient, 'convection_coefficient', message):
self._convection_coefficient = convection_coefficient
if self._check_necessary_argument(radiative_emissivity, 'radiative_emissivity', message):
self._radiative_emissivity = radiative_emissivity
if self._check_necessary_argument(shape_dimension_dict, 'shape_dimension_dict', message):
if 'shape' not in shape_dimension_dict:
raise ValueError(
"""
Error in reactor construction:
The shape_dimension_dict argument did not have the required ''shape'' key
""")
else:
self._check_constructor_argument(shape_dimension_dict['shape'], 'shape', self._shapes)
if 'char. length' not in shape_dimension_dict and 'volume' not in shape_dimension_dict:
raise ValueError(
"""
Error in reactor construction:
The shape_dimension_dict argument did not have one of the required ''char. length'' or ''volume'' keys
""")
elif 'char. length' in shape_dimension_dict and 'volume' in shape_dimension_dict:
raise ValueError(
"""
Error in reactor construction:
The shape_dimension_dict argument had both of the ''char. length'' or ''volume'' keys. Only one is allowed.
""")
if 'char. length' in shape_dimension_dict:
method = self._shape_dict[shape_dimension_dict['shape']]['l->sov']
self._surface_area_to_volume = method(shape_dimension_dict['char. length'])
elif 'volume' in shape_dimension_dict:
method = self._shape_dict[shape_dimension_dict['shape']]['v->sov']
self._surface_area_to_volume = method(shape_dimension_dict['volume'])
# save mass transfer specifics and check validity
if self._mass_transfer == 'closed':
self._mixing_tau = 0.
self._feed_temperature = 0.
self._feed_mass_fractions = np.ndarray(1)
self._feed_density = 0.
message = 'mass transfer is not set to open'
self._warn_unused_argument(mixing_tau, 'mixing_tau', message)
self._warn_unused_argument(feed_temperature, 'feed_temperature', message)
self._warn_unused_argument(feed_mass_fractions, 'feed_mass_fractions', message)
self._warn_unused_argument(feed_density, 'feed_density', message)
else:
message = 'mass transfer is set to open'
if self._check_necessary_argument(mixing_tau, 'mixing_tau', message):
self._mixing_tau = np.Inf if mixing_tau is None else mixing_tau
if self._check_necessary_argument(feed_temperature, 'feed_temperature', message):
self._feed_temperature = feed_temperature
if self._check_necessary_argument(feed_mass_fractions, 'feed_mass_fractions', message):
self._feed_mass_fractions = feed_mass_fractions
if self._configuration == 'isobaric':
self._warn_unused_argument(feed_density, 'feed_density',
message + ' but the reactor is isobaric, so feed_density is not needed')
self._feed_density = feed_density
else:
if self._check_necessary_argument(feed_density, 'feed_density', message):
self._feed_density = feed_density
# look for parameters that are functions of time
self._parameter_time_functions = set()
for attribute in ['_convection_temperature',
'_radiation_temperature',
'_convection_coefficient',
'_radiative_emissivity',
'_mixing_tau',
'_feed_temperature',
'_feed_mass_fractions',
'_feed_density']:
if callable(getattr(self, attribute)):
self._parameter_time_functions.add(attribute)
self._tc_is_timevar = '_convection_temperature' in self._parameter_time_functions
self._tr_is_timevar = '_radiation_temperature' in self._parameter_time_functions
self._cc_is_timevar = '_convection_coefficient' in self._parameter_time_functions
self._re_is_timevar = '_radiative_emissivity' in self._parameter_time_functions
self._tau_is_timevar = '_mixing_tau' in self._parameter_time_functions
self._tf_is_timevar = '_feed_temperature' in self._parameter_time_functions
self._yf_is_timevar = '_feed_mass_fractions' in self._parameter_time_functions
self._rf_is_timevar = '_feed_density' in self._parameter_time_functions
self._tc_value = self._convection_temperature(0.) if self._tc_is_timevar else self._convection_temperature
self._cc_value = self._convection_coefficient(0.) if self._cc_is_timevar else self._convection_coefficient
self._tr_value = self._radiation_temperature(0.) if self._tr_is_timevar else self._radiation_temperature
self._re_value = self._radiative_emissivity(0.) if self._re_is_timevar else self._radiative_emissivity
self._tau_value = self._mixing_tau(0.) if self._tau_is_timevar else self._mixing_tau
self._tf_value = self._feed_temperature(0.) if self._tf_is_timevar else self._feed_temperature
self._yf_value = self._feed_mass_fractions(0.) if self._yf_is_timevar else self._feed_mass_fractions
self._rf_value = self._feed_density(0.) if self._rf_is_timevar else self._feed_density
self._rates_sensitivity_option = {'dense': 0, 'no-TBAF': 1, 'sparse': 2}[rates_sensitivity_type]
self._sensitivity_transform_option = {'exact': 0}[sensitivity_transform_type]
self._is_open = self._mass_transfer == 'open'
self._heat_transfer_option = {'adiabatic': 0, 'isothermal': 1, 'diathermal': 2}[self._heat_transfer]
self._mechanism = mech_spec
self._griffon = self._mechanism.griffon
self._initial_pressure = initial_mixture.P
self._current_pressure = np.copy(self._initial_pressure)
self._initial_temperature = initial_mixture.T
self._current_temperature = np.copy(self._initial_temperature)
self._initial_mass_fractions = initial_mixture.Y
self._current_mass_fractions = np.copy(self._initial_mass_fractions)
self._initial_time = np.copy(initial_time)
self._current_time = np.copy(self._initial_time)
self._n_species = self._mechanism.n_species
self._n_reactions = self._mechanism.n_reactions
self._n_equations = self._n_species if self._configuration == 'isobaric' else self._n_species + 1
self._initial_state = zeros(self._n_equations)
if self._configuration == 'isobaric':
self._temperature_index = 0
self._initial_state[0] = np.copy(initial_mixture.T)
self._initial_state[1:] = np.copy(initial_mixture.Y[:-1])
elif self._configuration == 'isochoric':
self._temperature_index = 1
self._initial_state[0] = np.copy(initial_mixture.density)
self._initial_state[1] = np.copy(initial_mixture.T)
self._initial_state[2:] = np.copy(initial_mixture.Y[:-1])
self._current_state = np.copy(self._initial_state)
self._variable_scales = np.ones(self._n_equations)
self._variable_scales[self._temperature_index] = 1.e3
self._left_hand_side_inverse_operator = None
self._diag_indices = np.diag_indices(self._n_equations)
self._extra_logger_title_line1 = f'{"":<10} | {"":<10}|'
self._extra_logger_title_line2 = f' {"T (K)":<8} | {"T-T_0 (K)":<10}|'
def _update_heat_transfer_parameters(self, t):
self._tc_value = self._convection_temperature(t) if self._tc_is_timevar else self._tc_value
self._cc_value = self._convection_coefficient(t) if self._cc_is_timevar else self._cc_value
self._tr_value = self._radiation_temperature(t) if self._tr_is_timevar else self._tr_value
self._re_value = self._radiative_emissivity(t) if self._re_is_timevar else self._re_value
def _update_mass_transfer_parameters(self, t):
self._tau_value = self._mixing_tau(t) if self._tau_is_timevar else self._tau_value
self._tf_value = self._feed_temperature(t) if self._tf_is_timevar else self._tf_value
self._yf_value = self._feed_mass_fractions(t) if self._yf_is_timevar else self._yf_value
def _lapack_setup_wrapper(self, jacobian_method, state, prefactor):
j = jacobian_method(state) * prefactor
j[self._diag_indices] -= 1.
self._left_hand_side_inverse_operator = lapack_lu_factor(j)[:2]
def _superlu_setup_wrapper(self, jacobian_method, state, prefactor):
j = jacobian_method(state)
j *= prefactor
j[self._diag_indices] -= 1.
j = sparse_csc_matrix(j)
j.eliminate_zeros()
self._left_hand_side_inverse_operator = superlu_factor(j)
def _lapack_solve(self, residual):
return lapack_lu_solve(self._left_hand_side_inverse_operator[0],
self._left_hand_side_inverse_operator[1],
residual)[0], 1, True
def _superlu_solve(self, residual):
return self._left_hand_side_inverse_operator.solve(residual), 1, True
def _extra_logger_log(self, state, *args, **kwargs):
T = state[self._temperature_index]
T0 = self.initial_temperature
return f'{T:>10.2f} | {T - T0:>10.2f}|'
@property
def initial_state(self):
"""Obtain this reactor's initial state vector"""
return self._initial_state
@property
def current_state(self):
"""Obtain this reactor's final state vector"""
return self._current_state
@property
def initial_temperature(self):
"""Obtain this reactor's initial temperature"""
return self._initial_temperature
@property
def current_temperature(self):
"""Obtain this reactor's current temperature"""
return self._current_temperature
@property
def initial_pressure(self):
"""Obtain this reactor's initial pressure"""
return self._initial_pressure
@property
def current_pressure(self):
"""Obtain this reactor's current pressure"""
return self._current_pressure
@property
def initial_mass_fractions(self):
"""Obtain this reactor's initial mass fractions"""
return self._initial_mass_fractions
@property
def current_mass_fractions(self):
"""Obtain this reactor's current mass fractions"""
return self._current_mass_fractions
@property
def initial_time(self):
"""Obtain this reactor's initial mass fractions"""
return self._initial_time
@property
def current_time(self):
"""Obtain this reactor's current mass fractions"""
return self._current_time
@property
def n_species(self):
return self._n_species
@property
def n_reactions(self):
return self._n_reactions
@classmethod
def get_supported_reactor_shapes(self):
"""Obtain a list of supported reactor geometries"""
return HomogeneousReactor._shape_dict.keys()
def integrate(self,
stop_at_time=None,
stop_at_steady=None,
stop_criteria=None,
first_time_step=1.e-6,
max_time_step=1.e6,
minimum_time_step_count=40,
transient_tolerance=1.e-10,
write_log=False,
log_rate=100,
maximum_steps_per_jacobian=1,
nonlinear_solve_tolerance=1.e-12,
linear_solver='lapack',
plot=None,
stepper_type=KennedyCarpenterS6P4Q3,
nlsolver_type=SimpleNewtonSolver,
stepcontrol_type=PIController,
extra_integrator_args=dict(),
extra_stepper_args=dict(),
extra_nlsolver_args=dict(),
extra_stepcontrol_args=dict(),
save_first_and_last_only=False):
"""Base method for reactor integration
Parameters
----------
stop_at_time : float
The final time to stop the simulation at
stop_at_steady : float
The tolerance at which a steady state is decided upon and stopped at
stop_criteria : callable (t, state, residual, n_steps)
Any callable that returns True when the simulation should stop
first_time_step : float
The time step size initially used by the time integrator
max_time_step : float
The largest time step the time stepper is allowed to take
minimum_time_step_count : int
The minimum number of time steps to run (helpful for slowly evolving simulations, for instance those with low starting temperatures)
transient_tolerance : float
the target temporal error for transient integration
write_log : bool
whether or not to print integration statistics and status during the simulation
log_rate : int
how often to print log information
maximum_steps_per_jacobian : int
maximum number of steps Spitfire allows before the Jacobian must be re-evaluated - keep low for robustness, try to increase for performance on large mechanisms
nonlinear_solve_tolerance : float
tolerance for the nonlinear solver used in implicit time stepping (optional, default: 1e-12)
linear_solver : str
which linear solver to use, at the moment either 'lapack' (dense, direct) or 'superlu' (sparse, direct) are available
plot : list
List of variables (temperature and/or specific species names) to be plotted after the time integration completes.
No plot is shown if a list is not provided.
Temperature is plotted in the first subplot if any list of variables is provided for plotting (even if temperature is not specified in the list of variables).
Species mass fractions will be plotted in a second subplot if any species names are provided in the list of variables.
stepper_type : spitfire.time.TimeStepper
which (single step) stepper method to use (optional, default: ESDIRK64)
nlsolver_type : spitfire.time.NonlinearSolver
which nonlinear solver method to use (optional, default: SimpleNewtonSolver)
stepcontrol_type : spitfire.time.StepControl
which time step adaptation method to use (optional, default: PIController)
extra_integrator_args : dict
any extra arguments to specify to the time integrator - arguments passed to the odesolve method
extra_stepper_args : dict
extra arguments to specify on the spitfire.time.TimeStepper object
extra_nlsolver_args : dict
extra arguments to specify on the spitfire.time.NonlinearSolver object
extra_stepcontrol_args : dict
extra arguments to specify on the spitfire.time.StepControl object
save_first_and_last_only : bool
whether or not to retain all data (False, default) or only the first and last solutions
Returns
-------
a library containing temperature, mass fractions, and density (isochoric) or pressure (isobaric) over time
"""
def post_step_callback(t, state, *args):
state[state < 0.] = 0.
return state
integrator_args = {'stop_criteria': stop_criteria}
if stop_at_time is not None:
integrator_args.update({'stop_at_time': stop_at_time})
if stop_at_steady is not None:
integrator_args.update({'stop_at_steady': stop_at_steady})
integrator_args.update(extra_integrator_args)
# build the step controller and set attributes
step_control_args = {'first_step': first_time_step,
'max_step': max_time_step,
'target_error': transient_tolerance}
step_control_args.update(extra_stepcontrol_args)
step_controller = stepcontrol_type(**step_control_args)
# build the nonlinear solver and set attributes
nonlinear_solver_args = {'evaluate_jacobian_every_iter': False,
'norm_weighting': 1. / self._variable_scales,
'tolerance': nonlinear_solve_tolerance}
nonlinear_solver_args.update(extra_nlsolver_args)
newton_solver = nlsolver_type(**nonlinear_solver_args)
# build the stepper method and set attributes
stepper_args = {'nonlinear_solver': newton_solver, 'norm_weighting': 1. / self._variable_scales}
stepper_args.update(extra_stepper_args)
stepper = stepper_type(**stepper_args)
# build the rhs and projector methods and do the integration
if self._configuration == 'isobaric':
def rhs_method(time, state):
k = np.zeros(self._n_equations)
self._update_mass_transfer_parameters(time)
self._update_heat_transfer_parameters(time)
self._griffon.reactor_rhs_isobaric(state, self._initial_pressure,
self._tf_value, self._yf_value, self._tau_value,
self._tc_value, self._tr_value,
self._cc_value, self._re_value,
self._surface_area_to_volume,
self._heat_transfer_option, self._is_open, k)
return k
def jac_method(state):
k = | np.zeros(self._n_equations) | numpy.zeros |
import os
import torch
import torchvision
import matplotlib.pyplot as plt
import numpy as np
import json
import math
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from PIL import Image
from ipywidgets import widgets, interact
'''
Utils that do not serve a broader purpose, and generally are used for visualization or otherwise
'''
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def visualizeDataset(dataloader):
'''
Visualize a batch of tensors
'''
images, labels = next(iter(dataloader))
plt.imshow(torchvision.utils.make_grid(images, nrow=8).permute(1, 2, 0))
def visualizeBatch(dataloader, normalized):
'''
Visualize all the images in a batch in a subplot
Visualize one image as its own figure
'''
images, labels = next(iter(dataloader))
#print(images.shape) # [batch size, channels, depth, height, width]
img = images[0]
if len(img.shape) > 3:
#img = img.permute(0,2,1,3)
img = np.squeeze(img.numpy())
lab = np.squeeze(labels[0])
classes = ['s1', 'pct', 'tal', 'dct', 'cd', 'cd45', 'nestin', '31glom', '31int']
def update_layer(layer = 0):
plt.imshow(img[layer], cmap ='gray')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.title("Class is : " + classes[lab])
plt.imshow(img[0], cmap ='gray')
interact(update_layer, layer=widgets.IntSlider(min=0,max=img.shape[0]-1,step=1,value=0))
'''
for i in range(img.shape[1]):
img32 = img[0][i]
#print(img32.shape)
#img32 = (img32 + abs(np.amin(img32))) / (abs(np.amin(img32))+abs(np.amax(img32)))
img32 = Image.fromarray(img32)
plt.imshow(img32)
plt.show()
'''
return
img = unnormTensor(images[0], normalized)
plt.imshow(img, cmap='gray')
plt.show()
plt.hist(np.ravel(img), 255, range=[0.01,1])
plt.show()
fig = plt.figure(figsize=(40, 40))
batch = math.ceil(math.sqrt(dataloader.batch_size))
for i in range(len(images)):
a = fig.add_subplot(batch,batch,i+1)
img = unnormTensor(images[i], normalized)
imgplot = plt.imshow(img) #have to unnormalize data first!
plt.axis('off')
a.set_title("Label = " +str(labels[i].numpy()), fontsize=30)
def unnormTensor(tens, normalized):
'''
Takes a image tensor and returns the un-normalized numpy array scaled to [0,1]
'''
mean = [0.485, 0.456, 0.406]
std =[0.229, 0.224, 0.225]
img = tens.permute(1,2,0).numpy()
if normalized:
img = img*std + mean
if img.shape[2] == 1:
img = img.squeeze()
img = (img + abs(np.amin(img))) / (abs(np.amin(img))+abs(np.amax(img)))
return img
def visualizationOutGray(data, output, target, classes, normalized):
'''
Used to show the first test image in a batch with its label and prediction
Data size is batch_size, 1, 28, 28 (grayscale images!)
'''
ig = plt.figure()
output_cpu = output.to(torch.device("cpu"))
target_cpu = target.to(torch.device("cpu"))
output_idx = (np.argmax(output_cpu[0], axis=0)) #reverse one hot
cls = classes[output_idx]
plt.title("Prediction = " + str(cls) + " | Actual = " + str(classes[target_cpu[0].numpy()]) )
data_cpu = data.to(torch.device("cpu"))
img = unnormTensor(data_cpu[0], normalized)
plt.imshow(img, cmap = 'gray')
plt.pause(0.05)
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
y_true = np.array(y_true).astype(int).reshape(-1)
y_pred = np.array(y_pred).astype(int).reshape(-1)
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
class_list = []
for item in unique_labels(y_true, y_pred): class_list.append(classes[item])
classes = class_list
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.show()
return ax
def plot_confusion_matrix_combinePCT(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
y_true = np.array(y_true).astype(int).reshape(-1)
y_pred = np.array(y_pred).astype(int).reshape(-1)
y_true[y_true == 0] = 1
y_pred[y_pred == 0] = 1
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
accs = []
for i, row in enumerate(cm):
accs.append(cm[i,i] / np.sum(row))
print("Calculated balanced accuracy after combining PCT: " + str(np.mean(accs)))
# Only use the labels that appear in the data
class_list = []
for item in unique_labels(y_true, y_pred): class_list.append(classes[item])
classes = class_list
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks= | np.arange(cm.shape[0]) | numpy.arange |
import sys
import numpy as np
from astropy import units as u
from astropy.constants import G
from astropy.table import QTable
from scipy.integrate import solve_ivp
G = 6.7e-11 # Universal gravitational constant, SI
class Bodies:
def __init__(self):
self.posns = np.zeros((0,3))
self.vs = np.zeros((0,3))
self.ms = np.zeros((0))
self.rs = np.zeros((0))
self.sun = None
self.planets = []
self.nBodies = 0
self.time = 0
#----------------------------------------
# Some general utility methods
def is_iterable(self, x):
# a surprising omission from standard Python?
try:
iterator = iter(x)
except TypeError:
return False
else:
return True
def fix_units(self, val, unit):
"Convert to SI if necessary and return value as a Python scalar"
if isinstance(val, u.quantity.Quantity):
val = val.to(unit).value
return val
def veclen(self, vector):
# beware, units are lost and this just returns a number
return np.linalg.norm(vector)
def vecperp(self, vector):
"rotate 90 deg ccw, return normalised vector in x,y plane"
v = np.array([-vector[1], vector[0], 0])
return v/self.veclen(v)
def get_time(self):
"Cumulative integration time (s)"
return self.time*u.s
def CoM_velocity(self):
"Reruns velocity of center of mass"
return np.sum(self.vs * self.ms[:, np.newaxis], axis=0) / np.sum(self.ms)
def fix_CoM(self):
"Ensure CoM velocity is zero"
self.vs -= self.CoM_velocity()
#-----------------------------------------------------
# Several methods to add bodies to the collection
def add_sun(self, M, R=None):
"""
For 1-body problems, a large mass fixed at the origin
M = mass (kg or Quantity)
R = radius (m or Quantity); only for collision detection
"""
M = self.fix_units(M, u.kg)
R = self.fix_units(R, u.m)
self.sun = self.ms.size # index to this new body
self.posns = np.concatenate((self.posns, np.zeros((1,3))))
self.vs = np.concatenate((self.vs, np.zeros((1,3))))
self.ms = np.concatenate((self.ms, [M,]))
self.rs = np.concatenate((self.rs, [R,]))
self.nBodies = self.ms.size
def add_bodies(self, pos, v, m, R):
"""
Can be one body or many
single: need pos and v to be 3-item iterables
many: pos and p have shape (N,3), m and R are 1-D array-like
"""
if not self.is_iterable(m): # just have a single body
# make sure the 3-vectors are numpy arrays
# (this does nothing if they are already arrays)
pos = np.array(pos)
v = np.array(v)
# get everything to a suitable shape for concatenation
pos = pos[np.newaxis,:] # converts shape (3,) to (0,3)
v = v[np.newaxis,:]
m = [m,]
R = [R,]
self.posns = np.concatenate((self.posns, pos))
self.vs = np.concatenate((self.vs, v))
self.ms = np.concatenate((self.ms, [m,]))
self.rs = np.concatenate((self.rs, [R,]))
self.nBodies = self.ms.size
def add_planet_at_pericenter(self, a, e, i=0, phi=0, m=None, R=None):
"""
For setting up a 1-body Keplerian orbit.
a = semimajor axis (m or Quantity)
e = eccentricity
i = inclination (deg)
phi = orientation of perihelion (deg ccw from x-axis)
m = mass (kg or Quantity); only req if an N-body calc will be run
R = radius (m or Quantity); only for collision detection
"""
if self.sun is None:
display("Error: Please run add_sun() first")
return
else:
M_sun = self.ms[self.sun]
a = self.fix_units(a, u.m)
m = self.fix_units(m, u.kg)
R = self.fix_units(R, u.m)
P = | np.sqrt(4 * np.pi**2 * a**3/(G * M_sun)) | numpy.sqrt |
"""
source localization support
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/localization.py,v 1.34 2018/01/27 15:37:17 burnett Exp $
"""
import os,sys
import numpy as np
from skymaps import SkyDir
from uw.like import quadform
from uw.utilities import keyword_options
from . import (sources, plotting )
def moment_analysis(tsmap, wcs, fudge=1.44):
""" perform localization by a moment analysis of a TS map
tsmap : array of float: TS values on a grid, must be square
wcs : Projector object
implements pix2sph function of two ints to return ra,dec
fudge : float
Additional factor to multiply the ellipse radii
(Determined empirically)
returns:
ra, dec, ax, bx, ang
"""
vals = np.exp(-0.5* tsmap**2).flatten();
peak_fraction = vals.max()/sum(vals)
n = len(vals)
nx = ny =int(np.sqrt(n))
#centers of pixels have index +0.5
ix = np.array([ i % nx for i in range(n)]) +0.5
iy = np.array([ i //nx for i in range(n)]) +0.5
norm = 1./sum(vals)
t = [sum(u*vals)*norm for u in (ix,iy, ix**2, ix*iy, iy**2)]
center = (t[0],t[1])
C = np.matrix(center)
variance = (np.matrix(((t[2], t[3]),(t[3], t[4]))) - C.T * C)
ra,dec = wcs.pix2sph(center[0]+0.5,center[1]+0.5)
peak = SkyDir(ra,dec)
# get coords of center, measure degrees/pixel
nc = (nx+1)/2
rac, decc = wcs.pix2sph(nc, nc)
scale = wcs.pix2sph(nc, nc+1)[1] - decc
size = nx*scale
# adjust variance
variance = scale**2 * variance
offset = np.degrees(peak.difference(SkyDir(rac,decc)))
# add effects of binsize
var = variance #NO+ np.matrix(np.diag([1,1]))*(scale/3)**2
#Eigenvalue analysis to get ellipse coords
u,v =np.linalg.eigh(var)
ang =np.degrees(np.arctan2(v[1,1], -v[1,0]))
if min(u)< 0.5* max(u):
print ('Too elliptical : %s, setting circular' % u)
u[0]=u[1] = max(u)
tt = np.sqrt(u) * fudge
if u[1]>u[0]:
ax,bx = tt[1], tt[0]
ang = 90-ang
else:
ax,bx = tt
return ra, dec, ax,bx, ang
class MomentAnalysis(object):
""" localization using moment analysis
"""
def __init__(self, tsplot, fudge=1.44):
"""tsplot : TSPlot object
"""
self.tsp=tsplot
zea = tsplot.zea
wcs, tsmap = zea.projector, zea.image
self.ellipse = moment_analysis(tsmap, wcs, fudge)
def moments(self):
tsmap = self.tsp.zea.image
vals = np.exp(-0.5* tsmap**2).flatten();
peak_fraction = vals.max()/sum(vals)
n = len(vals)
nx = ny =int(np.sqrt(n))
ix = np.array([ i % nx for i in range(n)]) +0.5
iy = np.array([ i //nx for i in range(n)]) +0.5
norm = 1./sum(vals)
t = [sum(u*vals)*norm for u in (ix,iy, ix**2, ix*iy, iy**2)]
return t
def drawit(self):
self.tsp.overplot(self.ellipse, color='w', lw=2, ls='-', contours=[2.45])
self.tsp.plot(SkyDir(*self.ellipse[:2]), color='w', symbol='o' )
return self.tsp.zea.axes.figure
def full_localization(roi, source_name=None, ignore_exception=False,
update=False, associator=None, tsmap_dir='tsmap_fail', tsfits=False, delta_ts_bad=10):
import pylab as plt
source = roi.sources.find_source(source_name)
source.ellipsex = None # in case already had a moment analysis
tsp=None
with roi.tsmap_view(source.name) as tsm:
loc = Localization(tsm)
try:
if not loc.localize():
print ('Failed')
if hasattr(loc, 'ellipse') and (update or loc['qual']<1.0 and loc['a']<0.1):
# Automatically update position if good fit.
t = loc.ellipse
prev = tsm.saved_skydir
tsm.saved_skydir = SkyDir(t['ra'], t['dec'])
print ('updated position: %s --> %s' % (prev, tsm.saved_skydir))
else:
print ('Failed localization')
except Exception as msg:
print ('Localization of %s failed: %s' % (source.name, msg))
if not ignore_exception: raise
if not roi.quiet and hasattr(loc, 'niter') and loc.niter>0:
print ('Localized %s: %d iterations, moved %.3f deg, deltaTS: %.1f' % \
(source.name, loc.niter, loc.delt, loc.delta_ts))
labels = 'ra dec a b ang qual'.split()
print ((len(labels)*'%10s') % tuple(labels))
p = loc.qform.par[0:2]+loc.qform.par[3:7]
print (len(p)*'%10.4f' % tuple(p))
if associator is not None:
try:
make_association(source, loc.TSmap, associator, quiet=roi.quiet)
except Exception as msg:
print ('Exception raised associating %s: %s' %(source.name, msg))
if tsmap_dir is not None :
if hasattr(loc,'ellipse'):
a, qual, delta_ts = loc.ellipse['a'], loc.ellipse['qual'], loc.delta_ts
tsize = min(a*15., 2.0)
bad = a>0.25 or qual>5 or abs(delta_ts)>delta_ts_bad
if bad:
print ('Flagged as possibly bad: a=%.2f>0.25 or qual=%.1f>5 or abs(delta_ts=%.1f)>%f:'% (a, qual, delta_ts,delta_ts_bad))
else:
print ('no localization')
bad = True
tsize= 2.0
if tsmap_dir.endswith('fail') and not bad: return
# Make tsmap and apply moment analysis if failed fit or quality cuts
done = False
while not done:
try:
tsp=plotting.tsmap.plot(loc, source.name, center=tsm.saved_skydir,
outdir=tsmap_dir, catsig=0, size=tsize,
pixelsize= tsize/15, # was 14: desire to have central pixel
assoc=source.__dict__.get('adict', None), # either None or a dictionary
notitle=True, #don't do title
markersize=10,
primary_markersize=12,
tsfits=tsfits,
)
zea = tsp.zea
wcs = zea.projector
tsmap = zea.image
vals = np.exp(-0.5* tsmap**2).flatten();
peak_fraction = vals.max()/sum(vals)
except Exception as msg:
print ('Plot of %s failed: %s' % (source.name, msg))
return None
if peak_fraction<0.8:
done = True
else:
#scale is too large: reduce it
tsize /=2.
print ('peak fraction= %0.2f: setting size to %.2f' % (peak_fraction, tsize))
ellipsex = moment_analysis(zea.image, wcs)
source.ellipsex= list(ellipsex) + [tsize, peak_fraction] # copy to the source object
print ('moment analysis ellipse:', np.array(ellipsex))
rax, decx, ax,bx,phi = ellipsex
tsp.overplot([rax,decx,ax,bx, phi], color='w', lw=2, ls='-', contours=[2.45])
tsp.plot(SkyDir(rax,decx), color='w', symbol='o' );
filename = source.name.replace(' ','_').replace('+','p')
fout = os.path.join(tsmap_dir, ('%s_tsmap.jpg'%filename) )
print ('saving updated tsplot with moment analysis ellipse to %s...' % fout)
sys.stdout.flush()
plt.savefig(fout, bbox_inches='tight', padinches=0.2) #cuts off outherwise
return tsp
class Localization(object):
""" manage localization of a source
Implements a minimization interface
see also the localize function, which uses the eliptical fitter
"""
defaults = (
('tolerance',1e-4),
('verbose',False),
('update',False,"Update the source position after localization"),
('max_iteration',15,"Number of iterations"),
#('bandfits',True,"Default use bandfits"),
('maxdist',1,"fail if try to move further than this"),
('seedpos', None, 'if set, start from this position instead of the source position'),
('factor', 1.0, 'factor to divide the likelihood for systmatics'),
('quiet', False, 'set to suppress output'),
)
@keyword_options.decorate(defaults)
def __init__(self, tsm, **kwargs):
"""
tsm : a TSmap object, with a source selected
It defines a function that returns the TS, or 2x the likelihood ratio of a position with respect to the
source position
"""
keyword_options.process(self, kwargs)
self.tsm = tsm # roistat.tsmap_view(source_name)
self.maxlike = self.log_like()
self.skydir = self.tsm.skydir
if self.seedpos is not None:
if not isinstance(self.seedpos, SkyDir):
self.seedpos = SkyDir(*self.seedpos)
self.skydir = self.seedpos
self.name = self.tsm.source.name
if self.factor!=1.0:
print ('Applying factor {:.2f}'.format(self.factor))
def log_like(self, skydir=None):
""" return log likelihood at the given position"""
return self.tsm(skydir)/2
def TSmap(self, skydir):
""" return the TS at given position, or
2x the log(likelihood ratio) from the nominal position
"""
val= 2*(self.log_like(skydir)-self.maxlike)
return val / self.factor
# the following 3 functions are for a minimizer
def get_parameters(self):
return np.array([self.tsm.skydir.ra(), self.tsm.skydir.dec()])
def set_parameters(self, par):
self.skydir = SkyDir(par[0],par[1])
self.tsm.skydir = self.tsm.set_dir(self.skydir)
def __call__(self, par):
# for a minimizer
return -self.TSmap(SkyDir(par[0],par[1]))
def reset(self):
""" restore modifications to the source
"""
self.tsm.reset()
def dir(self):
return self.skydir
def errorCircle(self):
return 0.05 #initial guess
def spatialLikelihood(self, sd): #negative for legacy code below
return -self.log_like(sd)
def localize(self):
"""Localize a source using an elliptic approximation to the likelihood surface.
return fit position, number of iterations, distance moved, delta TS
"""
#roi = self.roi
#bandfits = self.bandfits
verbose = self.verbose
tolerance= self.tolerance
l = quadform.Localize(self,verbose = verbose)
ld = l.dir
ll0 = self.spatialLikelihood(self.skydir)
if not self.quiet:
fmt ='Localizing source %s, tolerance=%.1e...\n\t'+7*'%10s'
tup = (self.name, tolerance,)+tuple('moved delta ra dec a b qual'.split())
print (fmt % tup)
print (('\t'+4*'%10.4f')% (0,0,self.skydir.ra(), self.skydir.dec()))
diff = np.degrees(l.dir.difference(self.skydir))
print (('\t'+7*'%10.4f')% (diff,diff, l.par[0],l.par[1],l.par[3],l.par[4], l.par[6]))
old_sigma=1.0
for i in xrange(self.max_iteration):
try:
l.fit(update=True)
except:
#raise
l.recenter()
if not self.quiet: print ('trying a recenter...')
continue
diff = np.degrees(l.dir.difference(ld))
delt = np.degrees(l.dir.difference(self.skydir))
sigma = l.par[3]
if not self.quiet: print (('\t'+7*'%10.4f')% (diff, delt, l.par[0],l.par[1],l.par[3],l.par[4], l.par[6]))
if delt>self.maxdist:
l.par[6]=99 # flag very bad quality and resect position
l.sigma =1.0
l.par[0]=self.skydir.ra(); l.par[1]=self.skydir.dec()
if not self.quiet: print ('\t -attempt to move beyond maxdist=%.1f' % self.maxdist)
break
#self.tsm.source.ellipse = self.qform.par[0:2]+self.qform.par[3:7]
return False # hope this does not screw things up
#raise Exception('localize failure: -attempt to move beyond maxdist=%.1f' % self.maxdist)
if (diff < tolerance) and (abs(sigma-old_sigma) < tolerance):
break # converge
ld = l.dir
old_sigma=sigma
self.qform = l
self.lsigma = l.sigma
q = l.par
self.ellipse = dict(ra=float(q[0]), dec=float(q[1]),
a=float(q[3]), b=float(q[4]),
ang=float(q[5]), qual=float(q[6]),
lsigma = l.sigma)
ll1 = self.spatialLikelihood(l.dir)
if not self.quiet: print ('TS change: %.2f'%(2*(ll0 - ll1)))
#roi.delta_loc_logl = (ll0 - ll1)
# this is necessary in case the fit always fails.
delt = np.degrees(l.dir.difference(self.skydir))
self.delta_ts = 2*(ll0-ll1)
self.delt = delt
self.niter = i
# if successful, add a list representing the ellipse to the source
self.tsm.source.ellipse = self.qform.par[0:2]+self.qform.par[3:7] +[self.delta_ts]
return True #success
def summary(self):
if hasattr(self, 'niter') and self.niter>0:
print ('Localized %s: %d iterations, moved %.3f deg, deltaTS: %.1f' % \
(self.name, self.niter, self.delt, self.delta_ts))
labels = 'ra dec a b ang qual'.split()
print ((len(labels)*'%10s') % tuple(labels))
p = self.qform.par[0:2]+self.qform.par[3:7]
print (len(p)*'%10.4f' % tuple(p))
def localize_all(roi, ignore_exception=True, **kwargs):
""" localize all variable local sources in the roi, make TSmaps and associations if requested
ignore if extended -- has 'spatial_model'
kwargs can have prefix to select subset with name starting with the prefix, e.g. 'SEED'
"""
tsmin = kwargs.pop('tsmin',10)
prefix = kwargs.pop('prefix', None)
source_name = kwargs.pop('source_name', None)
update = kwargs.pop('update', False)
def filt(s):
ok = s.skydir is not None\
and isinstance(s, sources.PointSource) \
and | np.any(s.spectral_model.free) | numpy.any |
"""
Script to load the first trained model (by running first_try_training.py) and view the quality of infilling
"""
from ai_ct_scans.model_trainers import InfillTrainer
import torch
import numpy as np
import matplotlib.pyplot as plt
from ai_ct_scans.data_loading import data_root_directory
if torch.cuda.is_available():
dev = torch.device("cuda")
else:
dev = "cpu"
save_dir = data_root_directory().parent / "infiller_debug"
trainer = InfillTrainer(
num_encoder_convs=6,
num_decoder_convs=6,
batch_size=3,
clear_previous_memmaps=False,
save_dir=save_dir,
save_freq=1000,
encoder_filts_per_layer=24,
decoder_filts_per_layer=24,
num_dense_layers=1,
neurons_per_dense=128,
learning_rate=0.00001,
blank_width=16,
kernel_size=7,
)
trainer.load_model(trainer.save_dir)
scan = trainer.multi_patient_loader.patients[0].abdo.scan_1.full_memmap
step = trainer.blank_width
window = 256
y_start = 100
x_start = 75
mid_layer = int(scan.shape[1] / 2)
im_stack = np.zeros([9, 1, window, window])
input_stack = np.zeros([9, 1, window, window])
labels = []
plane_one_hots = []
body_part_one_hots = []
coords_sets = []
input_mask = trainer.plane_masks[0]
label_mask = trainer.label_masks[0]
i = 0
for row_step in range(3):
for col_step in range(3):
y_offset = row_step * step
x_offset = col_step * step
slice = scan[
y_start + y_offset : y_start + y_offset + window,
mid_layer,
x_start + x_offset : x_start + x_offset + window,
]
label = slice[label_mask].reshape([1, step, step])
labels.append(label)
im_stack[i, 0, :, :] = slice
input_stack[i, 0, :, :] = slice * input_mask
plane_one_hots.append( | np.array([0, 1, 0]) | numpy.array |
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
import sys
from astrometry.util.fits import *
from astrometry.util.plotutils import *
from astrometry.libkd.spherematch import match_radec
from tractor.sfd import SFDMap
from legacypipe.survey import *
def sample_in_radec_box(ralo, rahi, declo, dechi, N,
nbatch=1000):
'''
Draw N samples uniformly within the given RA,Dec box, correctly
handling the change of scale of RA with respect to Dec.
'''
rr,dd = [],[]
ntotal = 0
while ntotal < N:
# "unit" values ru in [0, 1) that will be scaled to RA
ru = np.random.uniform(size=nbatch)
# Draw Dec values
d = np.random.uniform(low=declo, high=dechi, size=nbatch)
# Taper the accepted width in RA based on Dec; reject ones outside
# NOTE that we could make this more efficient (reject fewer) by
# scaling by the min/max cos(Dec) values.
cosd = np.cos(np.deg2rad(d))
I = np.flatnonzero(ru < cosd)
if len(I) == 0:
continue
# Scale "ru" to RAs
r = ralo + (rahi - ralo) * ru[I]/cosd[I]
d = d[I]
rr.append(r)
dd.append(d)
ntotal += len(r)
#print('Kept', len(r), 'of', nbatch)
ra = np.hstack(rr)[:N]
dec = np.hstack(dd)[:N]
return ra,dec
def main():
ps = PlotSequence('shotgun')
survey = LegacySurveyData()
C = fits_table('survey-ccds-annotated.fits')
print(len(C), 'CCDs')
C.cut(C.photometric)
C.cut(C.blacklist_ok)
print(len(C), 'photometric and not blacklisted')
# HACK
print('FIXME not cutting on DECALS')
#C.cut(C.tilepass > 0)
#print(len(C), 'taken by DECaLS')
targets = dict(g=24.0, r=23.4, z=22.5)
def ivtomag(iv, nsigma=5.):
return -2.5 * (np.log10(nsigma / np.sqrt(iv)) - 9)
def band_index(band):
allbands = 'ugrizY'
return allbands.index(band)
ccmap = dict(g='g', r='r', z='m')
ceil_exptime = dict(g=125., r=125., z=250.)
#plt.clf()
bands = 'grz'
for band in bands:
tmag = targets[band]
print()
print(band, 'band, target depth', tmag)
ccds = C[C.filter == band]
ccdarea = (2046*4094*(0.262/3600.)**2)
print(len(ccds), 'CCDs, total exptime', | np.sum(ccds.exptime) | numpy.sum |
import numpy as np
import pandas as pd
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem.Fingerprints import FingerprintMols
from DeepPurpose.pybiomed_helper import _GetPseudoAAC, CalculateAADipeptideComposition, \
calcPubChemFingerAll, CalculateConjointTriad, GetQuasiSequenceOrder
import torch
from torch.utils import data
from torch.autograd import Variable
try:
from descriptastorus.descriptors import rdDescriptors, rdNormalizedDescriptors
except:
raise ImportError("Please install pip install git+https://github.com/bp-kelley/descriptastorus.")
from DeepPurpose.chemutils import get_mol, atom_features, bond_features, MAX_NB, ATOM_FDIM, BOND_FDIM
from subword_nmt.apply_bpe import BPE
import codecs
import pickle
import wget
from zipfile import ZipFile
import os
import sys
# ESPF encoding
vocab_path = './DeepPurpose/ESPF/drug_codes_chembl_freq_1500.txt'
bpe_codes_drug = codecs.open(vocab_path)
dbpe = BPE(bpe_codes_drug, merges=-1, separator='')
sub_csv = pd.read_csv('./DeepPurpose/ESPF/subword_units_map_chembl_freq_1500.csv')
idx2word_d = sub_csv['index'].values
words2idx_d = dict(zip(idx2word_d, range(0, len(idx2word_d))))
vocab_path = './DeepPurpose/ESPF/protein_codes_uniprot_2000.txt'
bpe_codes_protein = codecs.open(vocab_path)
pbpe = BPE(bpe_codes_protein, merges=-1, separator='')
#sub_csv = pd.read_csv(dataFolder + '/subword_units_map_protein.csv')
sub_csv = pd.read_csv('./DeepPurpose/ESPF/subword_units_map_uniprot_2000.csv')
idx2word_p = sub_csv['index'].values
words2idx_p = dict(zip(idx2word_p, range(0, len(idx2word_p))))
from DeepPurpose.chemutils import get_mol, atom_features, bond_features, MAX_NB
def create_var(tensor, requires_grad=None):
if requires_grad is None:
return Variable(tensor)
else:
return Variable(tensor, requires_grad=requires_grad)
def roc_curve(y_pred, y_label, figure_file, method_name):
'''
y_pred is a list of length n. (0,1)
y_label is a list of same length. 0/1
https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html#sphx-glr-auto-examples-model-selection-plot-roc-py
'''
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
y_label = np.array(y_label)
y_pred = np.array(y_pred)
fpr = dict()
tpr = dict()
roc_auc = dict()
fpr[0], tpr[0], _ = roc_curve(y_label, y_pred)
roc_auc[0] = auc(fpr[0], tpr[0])
lw = 2
plt.plot(fpr[0], tpr[0],
lw=lw, label= method_name + ' (area = %0.2f)' % roc_auc[0])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
fontsize = 14
plt.xlabel('False Positive Rate', fontsize = fontsize)
plt.ylabel('True Positive Rate', fontsize = fontsize)
plt.title('Receiver Operating Characteristic Curve')
plt.legend(loc="lower right")
plt.savefig(figure_file)
return
def prauc_curve(y_pred, y_label, figure_file, method_name):
'''
y_pred is a list of length n. (0,1)
y_label is a list of same length. 0/1
reference:
https://machinelearningmastery.com/roc-curves-and-precision-recall-curves-for-classification-in-python/
'''
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve, average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import auc
lr_precision, lr_recall, _ = precision_recall_curve(y_label, y_pred)
# plt.plot([0,1], [no_skill, no_skill], linestyle='--')
plt.plot(lr_recall, lr_precision, lw = 2, label= method_name + ' (area = %0.2f)' % average_precision_score(y_label, y_pred))
fontsize = 14
plt.xlabel('Recall', fontsize = fontsize)
plt.ylabel('Precision', fontsize = fontsize)
plt.title('Precision Recall Curve')
plt.legend()
plt.savefig(figure_file)
return
def length_func(list_or_tensor):
if type(list_or_tensor)==list:
return len(list_or_tensor)
return list_or_tensor.shape[0]
def index_select_ND(source, dim, index):
index_size = index.size()
suffix_dim = source.size()[1:]
final_size = index_size + suffix_dim
target = source.index_select(dim, index.view(-1))
return target.view(final_size)
def smiles2morgan(s, radius = 2, nBits = 1024):
try:
mol = Chem.MolFromSmiles(s)
features_vec = AllChem.GetHashedMorganFingerprint(mol, radius, nBits=nBits)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(features_vec, features)
except:
print('rdkit not found this smiles for morgan: ' + s + ' convert to all 1 features')
features = np.ones((nBits, ))
return features
def smiles2rdkit2d(s):
try:
generator = rdNormalizedDescriptors.RDKit2DNormalized()
features = np.array(generator.process(s)[1:])
NaNs = np.isnan(features)
features[NaNs] = 0
except:
print('descriptastorus not found this smiles: ' + s + ' convert to all 1 features')
features = np.ones((200, ))
return np.array(features)
def smiles2daylight(s):
try:
NumFinger = 2048
mol = Chem.MolFromSmiles(s)
bv = FingerprintMols.FingerprintMol(mol)
temp = tuple(bv.GetOnBits())
features = np.zeros((NumFinger, ))
features[np.array(temp)] = 1
except:
print('rdkit not found this smiles: ' + s + ' convert to all 1 features')
features = np.ones((2048, ))
return np.array(features)
def smiles2mpnnfeature(smiles):
## mpn.py::tensorize
'''
data-flow:
data_process(): apply(smiles2mpnnfeature)
DBTA: train(): data.DataLoader(data_process_loader())
mpnn_collate_func()
'''
try:
padding = torch.zeros(ATOM_FDIM + BOND_FDIM)
fatoms, fbonds = [], [padding]
in_bonds,all_bonds = [], [(-1,-1)]
mol = get_mol(smiles)
n_atoms = mol.GetNumAtoms()
for atom in mol.GetAtoms():
fatoms.append( atom_features(atom))
in_bonds.append([])
for bond in mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
x = a1.GetIdx()
y = a2.GetIdx()
b = len(all_bonds)
all_bonds.append((x,y))
fbonds.append( torch.cat([fatoms[x], bond_features(bond)], 0) )
in_bonds[y].append(b)
b = len(all_bonds)
all_bonds.append((y,x))
fbonds.append( torch.cat([fatoms[y], bond_features(bond)], 0) )
in_bonds[x].append(b)
total_bonds = len(all_bonds)
fatoms = torch.stack(fatoms, 0)
fbonds = torch.stack(fbonds, 0)
agraph = torch.zeros(n_atoms,MAX_NB).long()
bgraph = torch.zeros(total_bonds,MAX_NB).long()
for a in range(n_atoms):
for i,b in enumerate(in_bonds[a]):
agraph[a,i] = b
for b1 in range(1, total_bonds):
x,y = all_bonds[b1]
for i,b2 in enumerate(in_bonds[x]):
if all_bonds[b2][0] != y:
bgraph[b1,i] = b2
except:
print('Molecules not found and change to zero vectors..')
fatoms = torch.zeros(0,39)
fbonds = torch.zeros(0,50)
agraph = torch.zeros(0,6)
bgraph = torch.zeros(0,6)
#fatoms, fbonds, agraph, bgraph = [], [], [], []
#print(fatoms.shape, fbonds.shape, agraph.shape, bgraph.shape)
Natom, Nbond = fatoms.shape[0], fbonds.shape[0]
shape_tensor = torch.Tensor([Natom, Nbond]).view(1,-1)
return [fatoms.float(), fbonds.float(), agraph.float(), bgraph.float(), shape_tensor.float()]
# random_fold
def create_fold(df, fold_seed, frac):
train_frac, val_frac, test_frac = frac
test = df.sample(frac = test_frac, replace = False, random_state = fold_seed)
train_val = df[~df.index.isin(test.index)]
val = train_val.sample(frac = val_frac/(1-test_frac), replace = False, random_state = 1)
train = train_val[~train_val.index.isin(val.index)]
return train, val, test
# cold protein
def create_fold_setting_cold_protein(df, fold_seed, frac):
train_frac, val_frac, test_frac = frac
gene_drop = df['Target Sequence'].drop_duplicates().sample(frac = test_frac, replace = False, random_state = fold_seed).values
test = df[df['Target Sequence'].isin(gene_drop)]
train_val = df[~df['Target Sequence'].isin(gene_drop)]
gene_drop_val = train_val['Target Sequence'].drop_duplicates().sample(frac = val_frac/(1-test_frac),
replace = False,
random_state = fold_seed).values
val = train_val[train_val['Target Sequence'].isin(gene_drop_val)]
train = train_val[~train_val['Target Sequence'].isin(gene_drop_val)]
return train, val, test
# cold drug
def create_fold_setting_cold_drug(df, fold_seed, frac):
train_frac, val_frac, test_frac = frac
drug_drop = df['SMILES'].drop_duplicates().sample(frac = test_frac, replace = False, random_state = fold_seed).values
test = df[df['SMILES'].isin(drug_drop)]
train_val = df[~df['SMILES'].isin(drug_drop)]
drug_drop_val = train_val['SMILES'].drop_duplicates().sample(frac = val_frac/(1-test_frac),
replace = False,
random_state = fold_seed).values
val = train_val[train_val['SMILES'].isin(drug_drop_val)]
train = train_val[~train_val['SMILES'].isin(drug_drop_val)]
return train, val, test
def encode_drug(df_data, drug_encoding, column_name = 'SMILES', save_column_name = 'drug_encoding'):
print('encoding drug...')
print('unique drugs: ' + str(len(df_data[column_name].unique())))
if drug_encoding == 'Morgan':
unique = pd.Series(df_data[column_name].unique()).apply(smiles2morgan)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'Pubchem':
unique = pd.Series(df_data[column_name].unique()).apply(calcPubChemFingerAll)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'Daylight':
unique = pd.Series(df_data[column_name].unique()).apply(smiles2daylight)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'rdkit_2d_normalized':
unique = pd.Series(df_data[column_name].unique()).apply(smiles2rdkit2d)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'CNN':
unique = pd.Series(df_data[column_name].unique()).apply(trans_drug)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
# the embedding is large and not scalable but quick, so we move to encode in dataloader batch.
elif drug_encoding == 'CNN_RNN':
unique = pd.Series(df_data[column_name].unique()).apply(trans_drug)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'Transformer':
unique = pd.Series(df_data[column_name].unique()).apply(drug2emb_encoder)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
elif drug_encoding == 'MPNN':
unique = pd.Series(df_data[column_name].unique()).apply(smiles2mpnnfeature)
unique_dict = dict(zip(df_data[column_name].unique(), unique))
df_data[save_column_name] = [unique_dict[i] for i in df_data[column_name]]
else:
raise AttributeError("Please use the correct drug encoding available!")
return df_data
def encode_protein(df_data, target_encoding, column_name = 'Target Sequence', save_column_name = 'target_encoding'):
print('encoding protein...')
print('unique target sequence: ' + str(len(df_data[column_name].unique())))
if target_encoding == 'AAC':
print('-- Encoding AAC takes time. Time Reference: 24s for ~100 sequences in a CPU.\
Calculate your time by the unique target sequence #, instead of the entire dataset.')
AA = pd.Series(df_data[column_name].unique()).apply(CalculateAADipeptideComposition)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
elif target_encoding == 'PseudoAAC':
print('-- Encoding PseudoAAC takes time. Time Reference: 462s for ~100 sequences in a CPU.\
Calculate your time by the unique target sequence #, instead of the entire dataset.')
AA = pd.Series(df_data[column_name].unique()).apply(_GetPseudoAAC)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
elif target_encoding == 'Conjoint_triad':
AA = pd.Series(df_data[column_name].unique()).apply(CalculateConjointTriad)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
elif target_encoding == 'Quasi-seq':
AA = pd.Series(df_data[column_name].unique()).apply(GetQuasiSequenceOrder)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
elif target_encoding == 'CNN':
AA = pd.Series(df_data[column_name].unique()).apply(trans_protein)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
# the embedding is large and not scalable but quick, so we move to encode in dataloader batch.
elif target_encoding == 'CNN_RNN':
AA = pd.Series(df_data[column_name].unique()).apply(trans_protein)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
elif target_encoding == 'Transformer':
AA = pd.Series(df_data[column_name].unique()).apply(protein2emb_encoder)
AA_dict = dict(zip(df_data[column_name].unique(), AA))
df_data[save_column_name] = [AA_dict[i] for i in df_data[column_name]]
else:
raise AttributeError("Please use the correct protein encoding available!")
return df_data
def data_process(X_drug = None, X_target = None, y=None, drug_encoding=None, target_encoding=None,
split_method = 'random', frac = [0.7, 0.1, 0.2], random_seed = 1, sample_frac = 1, mode = 'DTI', X_drug_ = None, X_target_ = None):
#property_prediction_flag = X_target is None
property_prediction_flag, function_prediction_flag, DDI_flag, PPI_flag, DTI_flag = False, False, False, False, False
if (X_target is None) and (X_drug is not None) and (X_drug_ is None):
property_prediction_flag = True
elif (X_target is not None) and (X_drug is None) and (X_target_ is None):
function_prediction_flag = True
elif (X_drug is not None) and (X_drug_ is not None):
DDI_flag = True
if (X_drug is None) or (X_drug_ is None):
raise AttributeError("Drug pair sequence should be in X_drug, X_drug_")
elif (X_target is not None) and (X_target_ is not None):
PPI_flag = True
if (X_target is None) or (X_target_ is None):
raise AttributeError("Target pair sequence should be in X_target, X_target_")
elif (X_drug is not None) and (X_target is not None):
DTI_flag = True
if (X_drug is None) or (X_target is None):
raise AttributeError("Target pair sequence should be in X_target, X_drug")
else:
raise AttributeError("Please use the correct mode. Currently, we support DTI, DDI, PPI, Drug Property Prediction and Protein Function Prediction...")
if split_method == 'repurposing_VS':
y = [-1]*len(X_drug) # create temp y for compatitibility
if DTI_flag:
print('Drug Target Interaction Prediction Mode...')
if isinstance(X_target, str):
X_target = [X_target]
if len(X_target) == 1:
# one target high throughput screening setting
X_target = np.tile(X_target, (length_func(X_drug), ))
df_data = pd.DataFrame(zip(X_drug, X_target, y))
df_data.rename(columns={0:'SMILES',
1: 'Target Sequence',
2: 'Label'},
inplace=True)
print('in total: ' + str(len(df_data)) + ' drug-target pairs')
elif property_prediction_flag:
print('Drug Property Prediction Mode...')
df_data = pd.DataFrame(zip(X_drug, y))
df_data.rename(columns={0:'SMILES',
1: 'Label'},
inplace=True)
print('in total: ' + str(len(df_data)) + ' drugs')
elif function_prediction_flag:
print('Protein Function Prediction Mode...')
df_data = pd.DataFrame(zip(X_target, y))
df_data.rename(columns={0:'Target Sequence',
1: 'Label'},
inplace=True)
print('in total: ' + str(len(df_data)) + ' proteins')
elif PPI_flag:
print('Protein Protein Interaction Prediction Mode...')
df_data = pd.DataFrame(zip(X_target, X_target_, y))
df_data.rename(columns={0: 'Target Sequence 1',
1: 'Target Sequence 2',
2: 'Label'},
inplace=True)
print('in total: ' + str(len(df_data)) + ' protein-protein pairs')
elif DDI_flag:
print('Drug Drug Interaction Prediction Mode...')
df_data = pd.DataFrame(zip(X_drug, X_drug_, y))
df_data.rename(columns={0: 'SMILES 1',
1: 'SMILES 2',
2: 'Label'},
inplace=True)
print('in total: ' + str(len(df_data)) + ' drug-drug pairs')
if sample_frac != 1:
df_data = df_data.sample(frac = sample_frac).reset_index(drop = True)
print('after subsample: ' + str(len(df_data)) + ' data points...')
if DTI_flag:
df_data = encode_drug(df_data, drug_encoding)
df_data = encode_protein(df_data, target_encoding)
elif DDI_flag:
df_data = encode_drug(df_data, drug_encoding, 'SMILES 1', 'drug_encoding_1')
df_data = encode_drug(df_data, drug_encoding, 'SMILES 2', 'drug_encoding_2')
elif PPI_flag:
df_data = encode_protein(df_data, target_encoding, 'Target Sequence 1', 'target_encoding_1')
df_data = encode_protein(df_data, target_encoding, 'Target Sequence 2', 'target_encoding_2')
elif property_prediction_flag:
df_data = encode_drug(df_data, drug_encoding)
elif function_prediction_flag:
df_data = encode_protein(df_data, target_encoding)
# dti split
if DTI_flag:
if split_method == 'repurposing_VS':
pass
else:
print('splitting dataset...')
if split_method == 'random':
train, val, test = create_fold(df_data, random_seed, frac)
elif split_method == 'cold_drug':
train, val, test = create_fold_setting_cold_drug(df_data, random_seed, frac)
elif split_method == 'HTS':
train, val, test = create_fold_setting_cold_drug(df_data, random_seed, frac)
val = pd.concat([val[val.Label == 1].drop_duplicates(subset = 'SMILES'), val[val.Label == 0]])
test = pd.concat([test[test.Label == 1].drop_duplicates(subset = 'SMILES'), test[test.Label == 0]])
elif split_method == 'cold_protein':
train, val, test = create_fold_setting_cold_protein(df_data, random_seed, frac)
elif split_method == 'repurposing_VS':
train = df_data
val = df_data
test = df_data
elif split_method == 'no_split':
print('do not do train/test split on the data for already splitted data')
return df_data.reset_index(drop=True)
else:
raise AttributeError("Please select one of the three split method: random, cold_drug, cold_target!")
elif DDI_flag:
if split_method == 'random':
train, val, test = create_fold(df_data, random_seed, frac)
elif split_method == 'no_split':
return df_data.reset_index(drop=True)
elif PPI_flag:
if split_method == 'random':
train, val, test = create_fold(df_data, random_seed, frac)
elif split_method == 'no_split':
return df_data.reset_index(drop=True)
elif function_prediction_flag:
if split_method == 'random':
train, val, test = create_fold(df_data, random_seed, frac)
elif split_method == 'no_split':
return df_data.reset_index(drop=True)
elif property_prediction_flag:
# drug property predictions
if split_method == 'repurposing_VS':
train = df_data
val = df_data
test = df_data
elif split_method == 'no_split':
print('do not do train/test split on the data for already splitted data')
return df_data.reset_index(drop=True)
else:
train, val, test = create_fold(df_data, random_seed, frac)
print('Done.')
return train.reset_index(drop=True), val.reset_index(drop=True), test.reset_index(drop=True)
def data_process_repurpose_virtual_screening(X_repurpose, target, drug_encoding, target_encoding, mode):
if mode == 'repurposing':
target = np.tile(target, (len(X_repurpose), ))
elif mode == 'virtual screening':
target = target
else:
raise AttributeError("Please select repurposing or virtual screening!")
df, _, _ = data_process(X_repurpose, target, drug_encoding = drug_encoding,
target_encoding = target_encoding,
split_method='repurposing_VS')
return df
class data_process_loader(data.Dataset):
def __init__(self, list_IDs, labels, df, **config):
'Initialization'
self.labels = labels
self.list_IDs = list_IDs
self.df = df
self.config = config
def __len__(self):
'Denotes the total number of samples'
return len(self.list_IDs)
def __getitem__(self, index):
'Generates one sample of data'
index = self.list_IDs[index]
v_d = self.df.iloc[index]['drug_encoding']
if self.config['drug_encoding'] == 'CNN' or self.config['drug_encoding'] == 'CNN_RNN':
v_d = drug_2_embed(v_d)
v_p = self.df.iloc[index]['target_encoding']
if self.config['target_encoding'] == 'CNN' or self.config['target_encoding'] == 'CNN_RNN':
v_p = protein_2_embed(v_p)
y = self.labels[index]
return v_d, v_p, y
class data_process_DDI_loader(data.Dataset):
def __init__(self, list_IDs, labels, df, **config):
'Initialization'
self.labels = labels
self.list_IDs = list_IDs
self.df = df
self.config = config
print(df.columns.values)
def __len__(self):
'Denotes the total number of samples'
return len(self.list_IDs)
def __getitem__(self, index):
'Generates one sample of data'
index = self.list_IDs[index]
v_d = self.df.iloc[index]['drug_encoding_1']
if self.config['drug_encoding'] == 'CNN' or self.config['drug_encoding'] == 'CNN_RNN':
v_d = drug_2_embed(v_d)
v_p = self.df.iloc[index]['drug_encoding_2']
if self.config['drug_encoding'] == 'CNN' or self.config['drug_encoding'] == 'CNN_RNN':
v_p = drug_2_embed(v_p)
y = self.labels[index]
return v_d, v_p, y
class data_process_PPI_loader(data.Dataset):
def __init__(self, list_IDs, labels, df, **config):
'Initialization'
self.labels = labels
self.list_IDs = list_IDs
self.df = df
self.config = config
def __len__(self):
'Denotes the total number of samples'
return len(self.list_IDs)
def __getitem__(self, index):
'Generates one sample of data'
index = self.list_IDs[index]
v_d = self.df.iloc[index]['target_encoding_1']
if self.config['target_encoding'] == 'CNN' or self.config['target_encoding'] == 'CNN_RNN':
v_d = protein_2_embed(v_d)
v_p = self.df.iloc[index]['target_encoding_2']
if self.config['target_encoding'] == 'CNN' or self.config['target_encoding'] == 'CNN_RNN':
v_p = protein_2_embed(v_p)
y = self.labels[index]
return v_d, v_p, y
class data_process_loader_Property_Prediction(data.Dataset):
def __init__(self, list_IDs, labels, df, **config):
'Initialization'
self.labels = labels
self.list_IDs = list_IDs
self.df = df
self.config = config
def __len__(self):
'Denotes the total number of samples'
return len(self.list_IDs)
def __getitem__(self, index):
'Generates one sample of data'
index = self.list_IDs[index]
v_d = self.df.iloc[index]['drug_encoding']
if self.config['drug_encoding'] == 'CNN' or self.config['drug_encoding'] == 'CNN_RNN':
v_d = drug_2_embed(v_d)
#print("len(v_d)", len(v_d))
y = self.labels[index]
return v_d, y
class data_process_loader_Protein_Prediction(data.Dataset):
def __init__(self, list_IDs, labels, df, **config):
'Initialization'
self.labels = labels
self.list_IDs = list_IDs
self.df = df
self.config = config
def __len__(self):
'Denotes the total number of samples'
return len(self.list_IDs)
def __getitem__(self, index):
'Generates one sample of data'
index = self.list_IDs[index]
v_p = self.df.iloc[index]['target_encoding']
if self.config['target_encoding'] == 'CNN' or self.config['target_encoding'] == 'CNN_RNN':
v_p = protein_2_embed(v_p)
#print("len(v_d)", len(v_d))
y = self.labels[index]
return v_p, y
def generate_config(drug_encoding = None, target_encoding = None,
result_folder = "./result/",
input_dim_drug = 1024,
input_dim_protein = 8420,
hidden_dim_drug = 256,
hidden_dim_protein = 256,
cls_hidden_dims = [1024, 1024, 512],
mlp_hidden_dims_drug = [1024, 256, 64],
mlp_hidden_dims_target = [1024, 256, 64],
batch_size = 256,
train_epoch = 10,
test_every_X_epoch = 20,
LR = 1e-4,
decay = 0,
transformer_emb_size_drug = 128,
transformer_intermediate_size_drug = 512,
transformer_num_attention_heads_drug = 8,
transformer_n_layer_drug = 8,
transformer_emb_size_target = 64,
transformer_intermediate_size_target = 256,
transformer_num_attention_heads_target = 4,
transformer_n_layer_target = 2,
transformer_dropout_rate = 0.1,
transformer_attention_probs_dropout = 0.1,
transformer_hidden_dropout_rate = 0.1,
mpnn_hidden_size = 50,
mpnn_depth = 3,
cnn_drug_filters = [32,64,96],
cnn_drug_kernels = [4,6,8],
cnn_target_filters = [32,64,96],
cnn_target_kernels = [4,8,12],
rnn_Use_GRU_LSTM_drug = 'GRU',
rnn_drug_hid_dim = 64,
rnn_drug_n_layers = 2,
rnn_drug_bidirectional = True,
rnn_Use_GRU_LSTM_target = 'GRU',
rnn_target_hid_dim = 64,
rnn_target_n_layers = 2,
rnn_target_bidirectional = True,
num_workers = 0
):
base_config = {'input_dim_drug': input_dim_drug,
'input_dim_protein': input_dim_protein,
'hidden_dim_drug': hidden_dim_drug, # hidden dim of drug
'hidden_dim_protein': hidden_dim_protein, # hidden dim of protein
'cls_hidden_dims' : cls_hidden_dims, # decoder classifier dim 1
'batch_size': batch_size,
'train_epoch': train_epoch,
'test_every_X_epoch': test_every_X_epoch,
'LR': LR,
'drug_encoding': drug_encoding,
'target_encoding': target_encoding,
'result_folder': result_folder,
'binary': False,
'num_workers': num_workers
}
if not os.path.exists(base_config['result_folder']):
os.makedirs(base_config['result_folder'])
if drug_encoding == 'Morgan':
base_config['mlp_hidden_dims_drug'] = mlp_hidden_dims_drug # MLP classifier dim 1
elif drug_encoding == 'Pubchem':
base_config['input_dim_drug'] = 881
base_config['mlp_hidden_dims_drug'] = mlp_hidden_dims_drug # MLP classifier dim 1
elif drug_encoding == 'Daylight':
base_config['input_dim_drug'] = 2048
base_config['mlp_hidden_dims_drug'] = mlp_hidden_dims_drug # MLP classifier dim 1
elif drug_encoding == 'rdkit_2d_normalized':
base_config['input_dim_drug'] = 200
base_config['mlp_hidden_dims_drug'] = mlp_hidden_dims_drug # MLP classifier dim 1
elif drug_encoding == 'CNN':
base_config['cnn_drug_filters'] = cnn_drug_filters
base_config['cnn_drug_kernels'] = cnn_drug_kernels
elif drug_encoding == 'CNN_RNN':
base_config['rnn_Use_GRU_LSTM_drug'] = rnn_Use_GRU_LSTM_drug
base_config['rnn_drug_hid_dim'] = rnn_drug_hid_dim
base_config['rnn_drug_n_layers'] = rnn_drug_n_layers
base_config['rnn_drug_bidirectional'] = rnn_drug_bidirectional
base_config['cnn_drug_filters'] = cnn_drug_filters
base_config['cnn_drug_kernels'] = cnn_drug_kernels
elif drug_encoding == 'Transformer':
base_config['input_dim_drug'] = 2586
base_config['transformer_emb_size_drug'] = transformer_emb_size_drug
base_config['transformer_num_attention_heads_drug'] = transformer_num_attention_heads_drug
base_config['transformer_intermediate_size_drug'] = transformer_intermediate_size_drug
base_config['transformer_n_layer_drug'] = transformer_n_layer_drug
base_config['transformer_dropout_rate'] = transformer_dropout_rate
base_config['transformer_attention_probs_dropout'] = transformer_attention_probs_dropout
base_config['transformer_hidden_dropout_rate'] = transformer_hidden_dropout_rate
base_config['hidden_dim_drug'] = transformer_emb_size_drug
elif drug_encoding == 'MPNN':
base_config['hidden_dim_drug'] = hidden_dim_drug
base_config['batch_size'] = batch_size
base_config['mpnn_hidden_size'] = mpnn_hidden_size
base_config['mpnn_depth'] = mpnn_depth
#raise NotImplementedError
elif drug_encoding is None:
pass
else:
raise AttributeError("Please use the correct drug encoding available!")
if target_encoding == 'AAC':
base_config['mlp_hidden_dims_target'] = mlp_hidden_dims_target # MLP classifier dim 1
elif target_encoding == 'PseudoAAC':
base_config['input_dim_protein'] = 30
base_config['mlp_hidden_dims_target'] = mlp_hidden_dims_target # MLP classifier dim 1
elif target_encoding == 'Conjoint_triad':
base_config['input_dim_protein'] = 343
base_config['mlp_hidden_dims_target'] = mlp_hidden_dims_target # MLP classifier dim 1
elif target_encoding == 'Quasi-seq':
base_config['input_dim_protein'] = 100
base_config['mlp_hidden_dims_target'] = mlp_hidden_dims_target # MLP classifier dim 1
elif target_encoding == 'CNN':
base_config['cnn_target_filters'] = cnn_target_filters
base_config['cnn_target_kernels'] = cnn_target_kernels
elif target_encoding == 'CNN_RNN':
base_config['rnn_Use_GRU_LSTM_target'] = rnn_Use_GRU_LSTM_target
base_config['rnn_target_hid_dim'] = rnn_target_hid_dim
base_config['rnn_target_n_layers'] = rnn_target_n_layers
base_config['rnn_target_bidirectional'] = rnn_target_bidirectional
base_config['cnn_target_filters'] = cnn_target_filters
base_config['cnn_target_kernels'] = cnn_target_kernels
elif target_encoding == 'Transformer':
base_config['input_dim_protein'] = 4114
base_config['transformer_emb_size_target'] = transformer_emb_size_target
base_config['transformer_num_attention_heads_target'] = transformer_num_attention_heads_target
base_config['transformer_intermediate_size_target'] = transformer_intermediate_size_target
base_config['transformer_n_layer_target'] = transformer_n_layer_target
base_config['transformer_dropout_rate'] = transformer_dropout_rate
base_config['transformer_attention_probs_dropout'] = transformer_attention_probs_dropout
base_config['transformer_hidden_dropout_rate'] = transformer_hidden_dropout_rate
base_config['hidden_dim_protein'] = transformer_emb_size_target
elif target_encoding is None:
pass
else:
raise AttributeError("Please use the correct protein encoding available!")
return base_config
def convert_y_unit(y, from_, to_):
# basis as nM
if from_ == 'nM':
y = y
elif from_ == 'p':
y = 10**(-y) / 1e-9
if to_ == 'p':
y = -np.log10(y*1e-9 + 1e-10)
elif to_ == 'nM':
y = y
return y
def protein2emb_encoder(x):
max_p = 545
t1 = pbpe.process_line(x).split() # split
try:
i1 = np.asarray([words2idx_p[i] for i in t1]) # index
except:
i1 = np.array([0])
l = len(i1)
if l < max_p:
i = np.pad(i1, (0, max_p - l), 'constant', constant_values = 0)
input_mask = ([1] * l) + ([0] * (max_p - l))
else:
i = i1[:max_p]
input_mask = [1] * max_p
return i, np.asarray(input_mask)
def drug2emb_encoder(x):
max_d = 50
t1 = dbpe.process_line(x).split() # split
try:
i1 = np.asarray([words2idx_d[i] for i in t1]) # index
except:
i1 = np.array([0])
l = len(i1)
if l < max_d:
i = np.pad(i1, (0, max_d - l), 'constant', constant_values = 0)
input_mask = ([1] * l) + ([0] * (max_d - l))
else:
i = i1[:max_d]
input_mask = [1] * max_d
return i, np.asarray(input_mask)
'''
the returned tuple is fed into models.transformer.forward()
'''
# '?' padding
amino_char = ['?', 'A', 'C', 'B', 'E', 'D', 'G', 'F', 'I', 'H', 'K', 'M', 'L', 'O',
'N', 'Q', 'P', 'S', 'R', 'U', 'T', 'W', 'V', 'Y', 'X', 'Z']
smiles_char = ['?', '#', '%', ')', '(', '+', '-', '.', '1', '0', '3', '2', '5', '4',
'7', '6', '9', '8', '=', 'A', 'C', 'B', 'E', 'D', 'G', 'F', 'I',
'H', 'K', 'M', 'L', 'O', 'N', 'P', 'S', 'R', 'U', 'T', 'W', 'V',
'Y', '[', 'Z', ']', '_', 'a', 'c', 'b', 'e', 'd', 'g', 'f', 'i',
'h', 'm', 'l', 'o', 'n', 's', 'r', 'u', 't', 'y']
from sklearn.preprocessing import OneHotEncoder
enc_protein = OneHotEncoder().fit(np.array(amino_char).reshape(-1, 1))
enc_drug = OneHotEncoder().fit(np.array(smiles_char).reshape(-1, 1))
MAX_SEQ_PROTEIN = 1000
MAX_SEQ_DRUG = 100
def trans_protein(x):
temp = list(x.upper())
temp = [i if i in amino_char else '?' for i in temp]
if len(temp) < MAX_SEQ_PROTEIN:
temp = temp + ['?'] * (MAX_SEQ_PROTEIN-len(temp))
else:
temp = temp [:MAX_SEQ_PROTEIN]
return temp
def protein_2_embed(x):
return enc_protein.transform( | np.array(x) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Compute the Lyman-alpha S/N for the mean QSO at different g-mag, z,
as a function of wavelength.
It is the python version of the IDL code desi_quicklya.pro by <NAME> (LBL),
and it is heavily based on quicksim.py by <NAME> (UC Irvine)
Use desi_quicklya.py --help for instructions on running this program.
The DESIMODEL environment variable should be set to the root of your
desimodel package and PYTHONPATH should include $DESIMODEL/py/, e.g.
export DESIMODEL=`pwd`
export PYTHONPATH=$DESIMODEL/py/:$PYTHONPATH
Created 06-May-2015 by <NAME> (<EMAIL>)
Modified Jan-2018 by <NAME>
"""
import datetime,time
import argparse
import os,sys
import os.path
import numpy as np
import astropy.units as units
#import desimodel.simulate as sim
from desiutil.log import get_logger
from desispec.io.filters import load_filter
import desisim.simexp
import desisim.obs
import desisim.io
import desisim.util
import desitarget
import desispec.io
import desimodel.io
def sim_spectra(wave, flux, program, obsconditions=None,
sourcetype=None, expid=0, seed=0, specsim_config_file="desi"):
"""
Simulate spectra from an input set of wavelength and flux and writes a FITS file in the Spectra format that can
be used as input to the redshift fitter.
Args:
wave : 1D np.array of wavelength in Angstrom (in vacuum) in observer frame (i.e. redshifted)
flux : 1D or 2D np.array. 1D array must have same size as wave, 2D array must have shape[1]=wave.size
flux has to be in units of 10^-17 ergs/s/cm2/A
program : dark, lrg, qso, gray, grey, elg, bright, mws, bgs
ignored if obsconditions is not None
Optional:
obsconditions : dictionnary of observation conditions with SEEING EXPTIME AIRMASS MOONFRAC MOONALT MOONSEP
sourcetype : list of string, allowed values are (sky,elg,lrg,qso,bgs,star), type of sources, used for fiber aperture loss , default is star
expid : this expid number will be saved in the Spectra fibermap
seed : random seed
skyerr : fractional sky subtraction error
"""
log = get_logger()
if len(flux.shape)==1 :
flux=flux.reshape((1,flux.size))
nspec=flux.shape[0]
log.info("Starting simulation of {} spectra".format(nspec))
if sourcetype is None :
sourcetype = np.array(["star" for i in range(nspec)])
log.debug("sourcetype = {}".format(sourcetype))
tileid = 0
telera = 0
teledec = 0
dateobs = time.gmtime()
night = desisim.obs.get_night(utc=dateobs)
program = program.lower()
frame_fibermap = desispec.io.fibermap.empty_fibermap(nspec)
frame_fibermap.meta["FLAVOR"]="custom"
frame_fibermap.meta["NIGHT"]=night
frame_fibermap.meta["EXPID"]=expid
# add DESI_TARGET
tm = desitarget.targetmask.desi_mask
frame_fibermap['DESI_TARGET'][sourcetype=="lrg"]=tm.LRG
frame_fibermap['DESI_TARGET'][sourcetype=="elg"]=tm.ELG
frame_fibermap['DESI_TARGET'][sourcetype=="qso"]=tm.QSO
frame_fibermap['DESI_TARGET'][sourcetype=="sky"]=tm.SKY
frame_fibermap['DESI_TARGET'][sourcetype=="bgs"]=tm.BGS_ANY
# add dummy TARGETID
frame_fibermap['TARGETID']=np.arange(nspec).astype(int)
# spectra fibermap has two extra fields : night and expid
# This would be cleaner if desispec would provide the spectra equivalent
# of desispec.io.empty_fibermap()
spectra_fibermap = desispec.io.empty_fibermap(nspec)
spectra_fibermap = desispec.io.util.add_columns(spectra_fibermap,
['NIGHT', 'EXPID', 'TILEID'],
[np.int32(night), np.int32(expid), np.int32(tileid)],
)
for s in range(nspec):
for tp in frame_fibermap.dtype.fields:
spectra_fibermap[s][tp] = frame_fibermap[s][tp]
if obsconditions is None:
if program in ['dark', 'lrg', 'qso']:
obsconditions = desisim.simexp.reference_conditions['DARK']
elif program in ['elg', 'gray', 'grey']:
obsconditions = desisim.simexp.reference_conditions['GRAY']
elif program in ['mws', 'bgs', 'bright']:
obsconditions = desisim.simexp.reference_conditions['BRIGHT']
else:
raise ValueError('unknown program {}'.format(program))
elif isinstance(obsconditions, str):
try:
obsconditions = desisim.simexp.reference_conditions[obsconditions.upper()]
except KeyError:
raise ValueError('obsconditions {} not in {}'.format(
obsconditions.upper(),
list(desisim.simexp.reference_conditions.keys())))
try:
params = desimodel.io.load_desiparams()
wavemin = params['ccd']['b']['wavemin']
wavemax = params['ccd']['z']['wavemax']
except KeyError:
wavemin = desimodel.io.load_throughput('b').wavemin
wavemax = desimodel.io.load_throughput('z').wavemax
if wave[0] > wavemin:
log.warning('Minimum input wavelength {}>{}; padding with zeros'.format(
wave[0], wavemin))
dwave = wave[1] - wave[0]
npad = int((wave[0] - wavemin)/dwave + 1)
wavepad = np.arange(npad) * dwave
wavepad += wave[0] - dwave - wavepad[-1]
fluxpad = np.zeros((flux.shape[0], len(wavepad)), dtype=flux.dtype)
wave = np.concatenate([wavepad, wave])
flux = np.hstack([fluxpad, flux])
assert flux.shape[1] == len(wave)
assert np.allclose(dwave, np.diff(wave))
assert wave[0] <= wavemin
if wave[-1] < wavemax:
log.warning('Maximum input wavelength {}<{}; padding with zeros'.format(
wave[-1], wavemax))
dwave = wave[-1] - wave[-2]
npad = int( (wavemax - wave[-1])/dwave + 1 )
wavepad = wave[-1] + dwave + np.arange(npad)*dwave
fluxpad = np.zeros((flux.shape[0], len(wavepad)), dtype=flux.dtype)
wave = np.concatenate([wave, wavepad])
flux = np.hstack([flux, fluxpad])
assert flux.shape[1] == len(wave)
assert np.allclose(dwave, np.diff(wave))
assert wavemax <= wave[-1]
ii = (wavemin <= wave) & (wave <= wavemax)
flux_unit = 1e-17 * units.erg / (units.Angstrom * units.s * units.cm ** 2 )
wave = wave[ii]*units.Angstrom
flux = flux[:,ii]*flux_unit
nspec = flux.shape[0]
sim = desisim.simexp.simulate_spectra(wave, flux, fibermap=frame_fibermap,
obsconditions=obsconditions, seed=seed,specsim_config_file = specsim_config_file)
# full wave array
wmin=1e12
wmax=0.
dwave=0.
for table in sim.camera_output :
twave = table['wavelength'].astype(float)
wmin = min(wmin,np.min(twave))
wmax = max(wmax,np.max(twave))
if dwave==0 :
dwave=twave[1]-twave[0]
else :
assert(np.abs(dwave-(twave[1]-twave[0]))<0.0001)
wave=np.linspace(wmin,wmax,int((wmax-wmin)/dwave)+1)
log.debug("wmin wmax dwave wave= {} {} {} {}".format(wmin,wmax,dwave,wave))
sivarflux = np.zeros((nspec,wave.size))
sivar = np.zeros((nspec,wave.size))
# total signal on all cameras
for table in sim.camera_output :
twave = table['wavelength'].astype(float)
tivar = table['flux_inverse_variance'].T.astype(float)
tivarflux = table['flux_inverse_variance'].T.astype(float)*table['observed_flux'].T.astype(float)
for s in range(nspec) :
sivar[s] += np.interp(wave,twave,tivar[s],left=0,right=0)
sivarflux[s] += np.interp(wave,twave,tivarflux[s],left=0,right=0)
scale=1e17
flux = np.zeros(sivar.shape)
for s in range(nspec) :
ii=(sivar[s]>0)
flux[s,ii] = sivarflux[s,ii]/sivar[s,ii] * scale
ivar = sivar / scale**2
return wave,flux,ivar
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v','--verbose', action = 'store_true',
help = 'provide verbose output on progress')
parser.add_argument('--ab-magnitude', type = str, default = "g=23",
help = 'max magnitude to compute, e.g. g=22.0 or r=21.5')
parser.add_argument('--single-mag', action = 'store_true',
help = 'single magnitude')
parser.add_argument('--total-exptime', type = float, default = 4000,
help = 'overrides exposure time specified in the parameter file (secs)')
parser.add_argument('--nexp', type = int, default = 4,
help = 'number of exposures (affects total readnoise)')
parser.add_argument('--config', type = str, default = "desi",
help = 'path to specsim configuration file')
parser.add_argument('--prefix', type = str, default = "sn-spec-lya",
help = 'prefix for output S/N files')
header_string="# using {}".format(os.path.basename(sys.argv[0]))
for arg in sys.argv[1:] :
header_string += " "+arg
header_string+="\n"
if "USER" in os.environ :
header_string+="# run by {}\n".format(os.environ["USER"])
header_string+="# on {}".format(datetime.date.today())
print(header_string)
args = parser.parse_args()
log = get_logger()
obsconditions = desisim.simexp.reference_conditions['DARK']
obsconditions["EXPTIME"]=args.total_exptime/args.nexp
# We require that the DESIMODEL environment variable is set.
if 'DESIMODEL' not in os.environ:
raise RuntimeError('The environment variable DESIMODEL must be set.')
# Load the source spectrum to use.
infile=os.environ['DESIMODEL']+'/data/spectra/spec-lya.dat'
if not os.path.isfile(infile):
print('Could not find file: %s' % infile)
return -1
x=np.loadtxt(infile).T
wave = x[0]
flux = x[1:] # these are mean QSO spectra at different redshifts
# this is the same grid in the infile file, can't be changed here!
zqs = np.linspace(2.0, 4.75, 12)
assert(flux.shape[0] == zqs.size)
# figure out magnitude
try:
band = args.ab_magnitude[0]
abmag = float(args.ab_magnitude[2:])
assert band in 'ugriz' and args.ab_magnitude[1] == '='
except(AssertionError,ValueError):
print('Invalid ab-magnitude parameter. '
+'Valid syntax is, e.g. g=22.0 or r=21.5.')
return -1
if args.single_mag :
mags=[abmag]
else :
min_m=19.25
dm=0.5
Nm=np.ceil((abmag-min_m)/dm)
mags = np.linspace(min_m, min_m+Nm*dm, Nm+1)
if args.verbose: print('mags', mags)
# compute magnitudes of QSO spectra in input file
# assuming flux is prop to ergs/s/cm2/A
# (norme does not matter because the spectra will be rescaled)
filter_response = load_filter("SDSS_"+band.upper())
fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom
infile_mags = np.zeros(flux.shape[0])
for s in range(flux.shape[0]) :
infile_mags[s] = filter_response.get_ab_magnitude(flux[s]*fluxunits,wave)
print(s,infile_mags[s])
for mag in mags:
if args.verbose: print('generate file for %s = %f' % (band,mag) )
# scaling fluxes
scaled_flux = np.zeros(flux.shape)
for s in range(flux.shape[0]) :
scaled_flux[s] = 10**(-0.4*(mag-infile_mags[s])) * flux[s]
# simulating
sourcetype = np.array(["qso" for i in range(flux.shape[0])])
sim_wave,sim_flux,sim_ivar = sim_spectra(wave, scaled_flux, program="dark", obsconditions=obsconditions, sourcetype=sourcetype, specsim_config_file=args.config)
sim_snr = | np.sqrt(sim_ivar) | numpy.sqrt |
import numpy as np
import pp
from pp.components import bend_circular
from pp.components import taper as taper_factory
from pp.components import waveguide
from pp.config import TAPER_LENGTH, WG_EXPANDED_WIDTH
from pp.routing.manhattan import remove_flat_angles, round_corners
from pp.routing.utils import get_list_ports_angle
def _is_vertical(segment, tol=1e-5):
p0, p1 = segment
return abs(p0[0] - p1[0]) < tol
def _is_horizontal(segment, tol=1e-5):
p0, p1 = segment
return abs(p0[1] - p1[1]) < tol
def _segment_sign(s):
p0, p1 = s
if _is_vertical(s):
return np.sign(p1[1] - p0[1])
if _is_horizontal(s):
return np.sign(p1[0] - p0[0])
def get_ports_x_or_y_distances(list_ports, ref_point):
if not list_ports:
return []
angle = get_list_ports_angle(list_ports)
x0 = ref_point[0]
y0 = ref_point[1]
if angle in [0, 180]:
xys = [p.y - y0 for p in list_ports]
xs = [round(p.x, 5) for p in list_ports]
if len(set(xs)) > 1:
raise ValueError("List ports with angle 0/180 should all have the same x")
else:
xys = [p.x - x0 for p in list_ports]
ys = [round(p.y, 5) for p in list_ports]
if len(set(ys)) > 1:
raise ValueError("List ports with angle 90/270 should all have the same y")
return xys
def _distance(port1, port2):
if hasattr(port1, "x"):
x1, y1 = port1.x, port1.y
else:
x1, y1 = port1[0], port1[1]
if hasattr(port2, "x"):
x2, y2 = port2.x, port2.y
else:
x2, y2 = port2[0], port2[1]
dx = x1 - x2
dy = y1 - y2
return np.sqrt(dx ** 2 + dy ** 2)
def connect_bundle_waypoints(
start_ports,
end_ports,
way_points,
straight_factory=waveguide,
taper_factory=taper_factory,
bend_factory=bend_circular,
bend_radius=10.0,
auto_sort=True,
**kwargs
):
"""
Args:
start_ports: list of ports
end_ports: list of ports
way_points: list of points defining a route
"""
if len(end_ports) != len(start_ports):
raise ValueError(
"Number of start ports should match number of end ports.\
Got {} {}".format(
len(start_ports), len(end_ports)
)
)
for p in start_ports:
p.angle = int(p.angle) % 360
for p in end_ports:
p.angle = int(p.angle) % 360
start_angle = start_ports[0].orientation
end_angle = end_ports[0].orientation
# Sort the ports such that the bundle connect the correct corresponding ports.
angles_to_sorttypes = {
(0, 180): ("Y", "Y"),
(0, 90): ("Y", "X"),
(0, 0): ("Y", "-Y"),
(0, 270): ("Y", "-X"),
(90, 0): ("X", "Y"),
(90, 90): ("X", "-X"),
(90, 180): ("X", "-Y"),
(90, 270): ("X", "X"),
(180, 90): ("Y", "-X"),
(180, 0): ("Y", "Y"),
(180, 270): ("Y", "X"),
(180, 180): ("Y", "-Y"),
(270, 90): ("X", "X"),
(270, 270): ("X", "-X"),
(270, 0): ("X", "-Y"),
(270, 180): ("X", "Y"),
}
dict_sorts = {
"X": lambda p: p.x,
"Y": lambda p: p.y,
"-X": lambda p: -p.x,
"-Y": lambda p: -p.y,
}
key = (start_angle, end_angle)
sp_st, ep_st = angles_to_sorttypes[key]
start_port_sort = dict_sorts[sp_st]
end_port_sort = dict_sorts[ep_st]
if auto_sort:
start_ports.sort(key=start_port_sort)
end_ports.sort(key=end_port_sort)
routes = _generate_manhattan_bundle_waypoints(
start_ports, end_ports, way_points, **kwargs
)
bends90 = [bend_factory(radius=bend_radius, width=p.width) for p in start_ports]
if taper_factory:
if callable(taper_factory):
taper = taper_factory(
length=TAPER_LENGTH,
width1=start_ports[0].width,
width2=WG_EXPANDED_WIDTH,
layer=start_ports[0].layer,
)
else:
# In this case the taper is a fixed cell
taper = taper_factory
else:
taper = None
connections = [
round_corners(pts, bend90, straight_factory, taper=taper)
for pts, bend90 in zip(routes, bends90)
]
return connections
def snap_route_to_end_point_x(route, x):
y1, y2 = [p[1] for p in route[-2:]]
return route[:-2] + [(x, y1), (x, y2)]
def snap_route_to_end_point_y(route, y):
x1, x2 = [p[0] for p in route[-2:]]
return route[:-2] + [(x1, y), (x2, y)]
def _generate_manhattan_bundle_waypoints(
start_ports, end_ports, backbone_route, **kwargs
):
"""
Args:
start_ports: list of start ports. Should all be facing in the same direction
end_ports: list of end ports. Should all be facing in the same direction
route: going from one point somewhere within the start_ports bank to
another point within the end_ports bank
"""
backbone_route = remove_flat_angles(backbone_route)
way_segments = [(p0, p1) for p0, p1 in zip(backbone_route[:-1], backbone_route[1:])]
offsets_start = get_ports_x_or_y_distances(start_ports, backbone_route[0])
start_angle = start_ports[0].orientation
if start_angle in [90, 270]:
offsets_start = [-_d for _d in offsets_start]
end_angle = end_ports[0].orientation
def _displace_segment_copy(s, a, sh=1, sv=1):
sign_seg = _segment_sign(s)
if _is_horizontal(s):
dp = (0, sh * sign_seg * a)
elif _is_vertical(s):
dp = (sv * sign_seg * a, 0)
else:
raise ValueError("Segment should be manhattan, got {}".format(s))
displaced_seg = [np.array(p) + dp for p in s]
return displaced_seg
def _displace_segment_copy_group1(s, a):
return _displace_segment_copy(s, a, sh=1, sv=-1)
def _intersection(s1, s2):
if _is_horizontal(s1) and _is_vertical(s2):
sh, sv = s1, s2
elif _is_horizontal(s2) and _is_vertical(s1):
sh, sv = s2, s1
else:
if _is_horizontal(s1):
s1_dir = "h"
elif _is_vertical(s1):
s1_dir = "v"
else:
s1_dir = "u"
if _is_horizontal(s2):
s2_dir = "h"
elif _is_vertical(s2):
s2_dir = "v"
else:
s2_dir = "u"
raise ValueError(
"s1 / s2 should be h/v or v/h. Got \
{} {} {} {}".format(
s1_dir, s2_dir, s1, s2
)
)
return (sv[0][0], sh[0][1])
N = len(start_ports)
routes = []
_make_segment = _displace_segment_copy_group1
for i in range(N):
prev_seg_sep = offsets_start[i]
route = []
for j, seg in enumerate(way_segments):
seg_sep = prev_seg_sep
d_seg = _make_segment(seg, seg_sep)
if j == 0:
start_point = d_seg[0]
else:
prev_seg = way_segments[j - 1]
tmp_seg = _make_segment(prev_seg, prev_seg_sep)
start_point = _intersection(d_seg, tmp_seg)
route += [start_point]
# If last point before the ports, adjust the separation to the end ports
if j == len(way_segments) - 1:
end_point = end_ports[i].position
route += [end_point]
if end_angle in [0, 180]:
route = snap_route_to_end_point_y(route, end_point[1])
else:
route = snap_route_to_end_point_x(route, end_point[0])
routes += [route]
return routes
@pp.cell
def test_connect_bundle_waypoints():
return test_connect_bundle_waypointsA()
@pp.cell
def test_connect_bundle_waypointsA():
import pp
from pp.component import Port
xs1 = np.arange(10) * 5 - 500.0
N = xs1.size
ys2 = np.array([0, 5, 10, 20, 25, 30, 40, 55, 60, 75]) + 500.0
ports1 = [Port("A_{}".format(i), (xs1[i], 0), 0.5, 90) for i in range(N)]
ports2 = [Port("B_{}".format(i), (0, ys2[i]), 0.5, 180) for i in range(N)]
top_cell = pp.Component()
p0 = ports1[0].position + (22, 0)
way_points = [
p0,
p0 + (0, 100),
p0 + (200, 100),
p0 + (200, -200),
p0 + (0, -200),
p0 + (0, -350),
p0 + (400, -350),
(p0[0] + 400, ports2[-1].y),
ports2[-1].position,
]
elements = connect_bundle_waypoints(ports1, ports2, way_points)
top_cell.add(elements)
return top_cell
@pp.cell
def test_connect_bundle_waypointsB():
import pp
from pp.component import Port
ys1 = np.array([0, 5, 10, 15, 30, 40, 50, 60]) + 0.0
ys2 = np.array([0, 10, 20, 30, 70, 90, 110, 120]) + 500.0
N = ys1.size
ports1 = [Port("A_{}".format(i), (0, ys1[i]), 0.5, 0) for i in range(N)]
ports2 = [Port("B_{}".format(i), (500, ys2[i]), 0.5, 180) for i in range(N)]
p0 = ports1[0].position + (0, 22.5)
top_cell = pp.Component()
way_points = [
p0,
p0 + (200, 0),
p0 + (200, -200),
p0 + (400, -200),
(p0[0] + 400, ports2[0].y),
ports2[0].position,
]
elements = connect_bundle_waypoints(ports1, ports2, way_points)
top_cell.add(elements)
return top_cell
@pp.cell
def test_connect_bundle_waypointsC():
import pp
from pp.component import Port
ys1 = np.array([0, 5, 10, 15, 20, 60, 70, 80, 120, 125])
ys2 = np.array([0, 5, 10, 20, 25, 30, 40, 55, 60, 65]) - 500.0
N = ys1.size
ports1 = [Port("A_{}".format(i), (0, ys1[i]), 0.5, 0) for i in range(N)]
ports2 = [Port("B_{}".format(i), (600, ys2[i]), 0.5, 180) for i in range(N)]
top_cell = pp.Component()
way_points = [
ports1[0].position,
ports1[0].position + (200, 0),
ports1[0].position + (200, -200),
ports1[0].position + (400, -200),
(ports1[0].x + 400, ports2[0].y),
ports2[0].position,
]
elements = connect_bundle_waypoints(ports1, ports2, way_points)
top_cell.add(elements)
return top_cell
@pp.cell
def test_connect_bundle_waypointsD():
import pp
from pp.component import Port
ys1 = np.array([0, 5, 10, 20, 25, 30, 40, 55, 60, 75]) + 100.0
ys2 = | np.array([0, -5, -10, -20, -25, -30, -40, -55, -60, -75]) | numpy.array |
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch
import torch.utils.data as data
from torch.autograd import Variable
import numpy as np
from matplotlib import pyplot as plt
import time, pickle, itertools
import sys
sys.path.insert(0, '/Users/chanaross/dev/Thesis/MachineLearning/')
from dataLoader_uber import DataSetCnn_LSTM
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
isServerRun = torch.cuda.is_available()
if isServerRun:
print ('Running using cuda')
# creating optimization parameters and function
# adam -(params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,weight_decay=0)
# SGD -(params, lr=1e-3,momentum=0, dampening=0, weight_decay=0, nesterov=False)
# Adagrad -(params, lr=0.01, lr_decay=0, weight_decay=0)
def CreateOptimizer(netParams, ot, lr, dmp, mm, eps):
if ot == 1:
optim = torch.optim.SGD(netParams, lr, mm, dmp)
elif ot == 2:
optim = torch.optim.Adam(netParams, lr, (0.9, 0.999), eps)
elif ot == 3:
optim = torch.optim.Adagrad(netParams, lr)
return optim
class Model(nn.Module):
def __init__(self, cnn_input_size, class_size, hidden_size, batch_size, sequence_size, kernel_size,
stride_size, num_cnn_features, num_cnn_layers, fc_after_cnn_out_size):
super(Model, self).__init__()
self.sequence_size = sequence_size
self.hiddenSize = hidden_size
self.batch_size = batch_size
self.kernel_size = kernel_size
self.stride_size = stride_size
self.cnn_input_size = cnn_input_size
self.class_size = class_size
self.fc_output_size = fc_after_cnn_out_size
self.num_cnn_features = num_cnn_features
self.num_cnn_layers = num_cnn_layers
self.loss = None
self.lossCrit = None
self.optimizer = None
self.lr = None
self.maxEpochs = None
# output variables (loss, acc ect.)
self.finalAcc = 0
self.finalLoss = 0
self.lossVecTrain = []
self.lossVecTest = []
self.accVecTrain = []
self.accVecTest = []
self.cnn = nn.ModuleList()
self.fc_after_cnn = nn.ModuleList()
self.lstm = None
self.fc_after_lstm = None
self.logSoftMax = nn.LogSoftmax()
def create_cnn(self):
padding_size = int(0.5*(self.kernel_size - 1))
# defines cnn network
layers = []
for i in range(self.num_cnn_layers):
if i == 0:
layers += [nn.Conv2d(self.cnn_input_size, self.num_cnn_features, kernel_size=self.kernel_size, stride=self.stride_size, padding=padding_size),
# nn.BatchNorm2d(self.num_cnn_features),
nn.ReLU(inplace=True)]
else:
layers += [nn.Conv2d(self.num_cnn_features, self.num_cnn_features, kernel_size=self.kernel_size, stride=self.stride_size, padding=padding_size),
# nn.BatchNorm2d(self.num_cnn_features),
nn.ReLU(inplace=True)]
return nn.Sequential(*layers)
def create_lstm(self, input_size):
layer = nn.LSTM(input_size, self.hiddenSize)
return layer
def create_fc_after_cnn(self, input_size, output_size):
layer = nn.Sequential(nn.Linear(input_size, output_size), nn.ReLU())
return layer
def create_fc_after_lstm(self, input_size, output_size):
layer = nn.Sequential(nn.Linear(input_size, output_size))
return layer
def forward(self,x):
cnn_output = torch.zeros([self.batch_size, self.fc_output_size, self.sequence_size])
# x is of size : [batch_size , mat_x , mat_y , sequence_size]
for i in range(self.sequence_size):
batch_size = x.size(0)
xtemp = x[:, i, :, :].view(x.size(0), 1, x.size(2), x.size(3))
out = self.cnn[i](xtemp)
out = out.view((batch_size, -1))
out = self.fc_after_cnn[i](out) # after fully connected out is of size : [batch_size, fully_out_size]
cnn_output[:, :, i] = out
output, (h_n, c_n) = self.lstm(cnn_output.view(self.sequence_size, batch_size, -1))
out = self.fc_after_lstm(h_n)
out = self.logSoftMax(out.view(batch_size,-1)) # after last fc out is of size: [batch_size , num_classes] and is after LogSoftMax
return out
def calcLoss(self, outputs, labels):
if self.loss is None:
self.loss = self.lossCrit(outputs, labels)
else:
self.loss += self.lossCrit(outputs, labels)
# creating backward propagation - calculating loss function result
def backward(self):
self.loss.backward(retain_graph=True)
# testing network on given test set
def test_spesific(self, testLoader):
# put model in evaluate mode
self.eval()
testCorr = 0.0
testTot = 0.0
localLossTest = []
localAccTest = []
for inputs, labels in testLoader:
inputVar = to_var(inputs)
labVar = to_var(labels)
# compute test result of model
x_size = inputVar.shape[2]
y_size = inputVar.shape[3]
testOut = np.zeros(shape=(self.batch_size, self.class_size, x_size, y_size))
k = 0
for x in range(x_size):
for y in range(y_size): # calculate output for each grid_id
testOut[:, :, x, y] = self.forward(inputVar[:, :, :, :, k])
k += 1
# find loss of test set
self.backward(testOut, labVar)
localLossTest.append(self.loss.item())
_, labTest = torch.max(testOut.data, 1)
if torch.cuda.is_available():
labTest = labTest.cpu()
testCorr = torch.sum(labTest == labels).detach().numpy() + testCorr
testTot = labels.size(0) * labels.size(1) * labels.size(2) + testTot
localAccTest.append(100 * testCorr / testTot)
accTest = np.average(localAccTest)
lossTest = np.average(localLossTest)
print("test accuarcy is: {0}".format(accTest))
return accTest, lossTest
# save network
def saveModel(self, path):
torch.save(self, path)
def main():
#####################
# Generate data
#####################
# data loader -
path = '/Users/chanaross/dev/Thesis/UberData/'
fileName = '3D_UpdatedGrid_5min_250Grid_LimitedEventsMat_allData.p'
dataInput = np.load(path + fileName)
xmin = 0
xmax = 20
ymin = 0
ymax = 20
dataInput = dataInput[xmin:xmax, ymin:ymax, :] # shrink matrix size for fast training in order to test model
# define important sizes for network -
x_size = dataInput.shape[0]
y_size = dataInput.shape[1]
dataSize = dataInput.shape[2]
classNum = (np.max(np.unique(dataInput)) + 1).astype(int)
testSize = 0.2
sequence_size = 5 # length of sequence for lstm network
cnn_input_size= 1 # size of matrix in input cnn layer - each sequence goes into different cnn network
cnn_dimention = 7 # size of matrix around point i for cnn network
batch_size = 300
num_epochs = 100
num_train = int((1 - testSize) * dataSize)
# define hyper parameters -
hidden_size = 64
kernel_size = 3
stride_size = 1
num_cnn_features = 64
num_cnn_layers = 3
fc_after_cnn_out_size = 64
# optimizer parameters -
lr = 0.01
ot = 1
dmp = 0
mm = 0.9
eps = 1e-08
# create network based on input parameter's -
my_net = Model(cnn_input_size, classNum, hidden_size, batch_size, sequence_size, kernel_size,
stride_size, num_cnn_features, num_cnn_layers, fc_after_cnn_out_size)
for i in range(sequence_size):
my_net.cnn.append(my_net.create_cnn())
my_net.fc_after_cnn.append(my_net.create_fc_after_cnn(num_cnn_features*cnn_dimention*cnn_dimention, fc_after_cnn_out_size))
my_net.lstm = my_net.create_lstm(fc_after_cnn_out_size)
my_net.fc_after_lstm = my_net.create_fc_after_lstm(my_net.hiddenSize, classNum)
# setup network
if isServerRun:
my_net = my_net.cuda()
numWeights = sum(param.numel() for param in my_net.parameters())
print('number of parameters: ', numWeights)
my_net.optimizer = CreateOptimizer(my_net.parameters(), ot, lr, dmp, mm, eps)
my_net.lossCrit = nn.NLLLoss()
my_net.maxEpochs = num_epochs
# load data from data loader and create train and test sets
data_train = dataInput[:, :, 0:num_train]
data_test = dataInput[:, :, num_train + 1:]
dataset_uber_train = DataSetCnn_LSTM(data_train, sequence_size, cnn_dimention)
dataset_uber_test = DataSetCnn_LSTM(data_test, sequence_size, cnn_dimention)
# creating data loader
dataloader_uber_train = data.DataLoader(dataset=dataset_uber_train, batch_size=batch_size, shuffle=True)
dataloader_uber_test = data.DataLoader(dataset=dataset_uber_test, batch_size=batch_size, shuffle=False)
for numEpoch in range(num_epochs):
my_net.loss = None
# for each epoch, calculate loss for each batch -
my_net.train()
localLoss = []
accTrain = []
trainCorr = 0.0
trainTot = 0.0
if (1+numEpoch)%20 == 0:
if my_net.optimizer.param_groups[0]['lr']>0.001:
my_net.optimizer.param_groups[0]['lr'] = my_net.optimizer.param_groups[0]['lr']/2
else:
my_net.optimizer.param_groups[0]['lr'] = 0.001
print('lr is: %.6f' % my_net.optimizer.param_groups[0]['lr'])
for i, (input, labels) in enumerate(dataloader_uber_train):
my_net.loss = None
# create torch variables
# input is of size [batch_size, seq_len, x_inputCnn, y_inputCnn, grid_id]
inputVar = to_var(input)
labVar = to_var(labels)
# reset gradient
my_net.optimizer.zero_grad()
# forward
k = 0
labTrain = torch.tensor([])
labTrain = labTrain.new_zeros(labVar.size())
for x in range(x_size):
for y in range(y_size): # calculate output for each grid_id
netOut = my_net.forward(inputVar[:, :, :, :, k])
_, labTrain[:, x, y] = torch.max(netOut.data, 1)
my_net.calcLoss(netOut, labVar[:, x, y])
k += 1
# backwards
my_net.backward()
# optimizer step
my_net.optimizer.step()
# local loss function list
localLoss.append(my_net.loss.item())
if isServerRun:
labTrain = labTrain.cpu()
trainCorr = torch.sum(labTrain.long() == labels).detach().numpy() + trainCorr
trainTot = labels.size(0) * labels.size(1) * labels.size(2) + trainTot
accTrain.append(100 * trainCorr / trainTot)
# output current state
if (i + 1) % 1 == 0:
print('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f, Acc: %.4f'
% (numEpoch + 1, my_net.maxEpochs, i + 1,
dataloader_uber_train.dataset.data.shape[2] // dataloader_uber_train.batch_size,
my_net.loss.item(), accTrain[-1]))
if (accTrain[-1]<= np.max(np.array(accTrain))):
pickle.dump(my_net, open("gridSize"+str(xmax - xmin)+"_epoch"+str(numEpoch)+"_batch"+str(i)+ ".pkl", 'wb'))
my_net.saveModel("gridSize"+str(xmax - xmin)+"_epoch"+str(numEpoch)+"_batch"+str(i) + "_torch.pkl")
networkStr = "gridSize"+str(xmax - xmin)+"_epoch"+str(numEpoch)+"_batch"+str(i)
outArray = np.stack([np.array([]), np.array(my_net.lossVecTrain),
np.array([]), np.array(my_net.accVecTrain)])
np.save(networkStr, outArray)
my_net.lossVecTrain.append( | np.average(localLoss) | numpy.average |
import os
import scipy.io
import numpy as np
from scripts.processes.CreateLonLat import CreateLonLat
from scripts.processes.PsEstGamma import PsEstGamma
from scripts.processes.PsFiles import PsFiles
from scripts.processes.PsSelect import PsSelect
from scripts.processes.PsWeed import PsWeed
from tests.MetaTestCase import MetaTestCase
class TestPsWeed(MetaTestCase):
_GEO_DATA_FILE_NAME = 'subset_8_of_S1A_IW_SLC__1SDV_20160614T043402_20160614T043429_011702_011EEA_F130_Stack_deb_ifg_Geo.dim'
# noinspection PyUnresolvedReferences
@classmethod
def setUpClass(cls):
super().setUpClass()
lonlat_process = CreateLonLat(cls._PATH, cls._GEO_DATA_FILE_NAME)
lonlat_process.load_results(cls._SAVE_LOAD_PATH)
cls.__ps_files = PsFiles(cls._PATH_PATCH_FOLDER, lonlat_process)
cls.__ps_files.load_results(cls._SAVE_LOAD_PATH)
cls.__est_gamma_process: PsEstGamma = None
# ps_est_gamma may be None because we load it from ps_select
cls.__ps_select = PsSelect(cls.__ps_files, cls.__est_gamma_process)
cls.__ps_select.load_results(cls._SAVE_LOAD_PATH)
cls.__ps_weed_process = None
def test_start_process_with_matlab_data(self):
def bool_to_int_array(bool_array: np.ndarray):
return np.where(bool_array == 1)[0]
self.__fill_est_gamma_with_matlab_data()
self.__start_process()
weed_mat = scipy.io.loadmat(os.path.join(self._PATCH_1_FOLDER, 'weed1.mat'))
np.testing.assert_array_almost_equal(np.where(self.__ps_weed_process.selectable_ps)[0],
bool_to_int_array(weed_mat['ix_weed']))
np.testing.assert_array_almost_equal(np.where(self.__ps_weed_process.selectable_ps2)[0],
bool_to_int_array(weed_mat['ix_weed2']))
# Because 'drop_noisy' result 'weighted_least_sqrt2' differs a bit than in Snap so those
# arrays are also different and needs to checked like this. But this error does not go
# further to selectable_ps and selectable_ps2
PS_RTOL = 0.28
PS_ATOL = 0.055
np.testing.assert_allclose(self.__ps_weed_process.ps_std, np.squeeze(weed_mat['ps_std']),
PS_RTOL, PS_ATOL)
np.testing.assert_allclose(self.__ps_weed_process.ps_max, np.squeeze(weed_mat['ps_max']),
PS_RTOL, PS_ATOL)
np.testing.assert_array_almost_equal(np.add(self.__ps_weed_process.ifg_ind, 1),
np.reshape(weed_mat['ifg_index'], len(self.__ps_weed_process.ifg_ind)))
def test_save_and_load_results(self):
self.__fill_est_gamma_with_matlab_data()
self.__start_process()
self.__ps_weed_process.save_results(self._SAVE_LOAD_PATH)
ps_weed_loaded = PsWeed(self._PATH, self.__ps_files, self.__est_gamma_process, self.__ps_select)
ps_weed_loaded.load_results(self._SAVE_LOAD_PATH)
np.testing.assert_array_equal(self.__ps_weed_process.selectable_ps, ps_weed_loaded.selectable_ps)
| np.testing.assert_array_equal(self.__ps_weed_process.selectable_ps2, ps_weed_loaded.selectable_ps2) | numpy.testing.assert_array_equal |
import unittest
from copy import deepcopy
from tensorly.decomposition import partial_tucker
from palmnet.core.layer_replacer_tucker import LayerReplacerTucker
from palmnet.data import Mnist
import numpy as np
from tensorly.tenalg.n_mode_product import multi_mode_dot
class TestLayerReplacerTucker(unittest.TestCase):
def setUp(self) -> None:
self.base_model = Mnist.load_model("cifar100_vgg19_2048x2048")
def test_simple(self):
model_transformer = LayerReplacerTucker(keep_last_layer=True)
new_model = model_transformer.fit_transform(deepcopy(self.base_model))
model_transformer = LayerReplacerTucker(rank_percentage_dense=0.5, keep_last_layer=True)
new_model = model_transformer.fit_transform(deepcopy(self.base_model))
def test_tucker_decomposition(self):
import tensorly
h, w, c, f = 3, 3, 64, 128
c_prim, f_prim = 16, 32
base_tensor = np.random.rand(h, w, c, f)
lst_fac = []
for k in [2, 3]:
mod_k_unfold = tensorly.base.unfold(base_tensor, k)
U, _, _ = np.linalg.svd(mod_k_unfold)
lst_fac.append(U)
# real_in_fac, real_out_fac = lst_fac[0][:, :c_prim], lst_fac[1][:, :f_prim]
real_in_fac, real_out_fac = lst_fac[0], lst_fac[1]
real_core = multi_mode_dot(base_tensor, [real_in_fac.T, real_out_fac.T], modes=(2,3))
del base_tensor # no need of it anymore
real_core = real_core[:,:,:c_prim,:f_prim]
real_in_fac = real_in_fac[:, :c_prim]
real_out_fac = real_out_fac[:, :f_prim]
base_tensor_low_rank = multi_mode_dot(real_core, [real_in_fac, real_out_fac], modes=(2,3))
in_rank, out_rank = LayerReplacerTucker.get_rank_layer(base_tensor_low_rank)
assert in_rank == c_prim and out_rank == f_prim, f"{in_rank}!={c_prim} or {out_rank} != {f_prim}" # in_rank=16, out_rank=32 -> it works!
decomposition = LayerReplacerTucker.get_tucker_decomposition(base_tensor_low_rank, in_rank, out_rank)
# core_tilde, (in_fac_tilde, out_fac_tilde) = partial_tucker(base_tensor, modes=(2, 3), ranks=(in_rank, out_rank), init='svd')
in_fac_tilde, core_tilde, out_fac_tilde = decomposition
base_tensor_tilde = multi_mode_dot(core_tilde, [in_fac_tilde, out_fac_tilde], modes=(2,3))
assert | np.allclose(base_tensor_tilde, base_tensor_low_rank) | numpy.allclose |
"""
Tests for the research package's ProCoDA parsing functions
"""
import unittest
import aguaclara.research.procoda_parser as pp
from aguaclara.core.units import u
import pandas as pd
import numpy as np
import os
from matplotlib.testing.compare import compare_images
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
class TestProCoDAParser(unittest.TestCase):
def test_column_of_data(self):
'''''
Extract other columns of data and append units.
'''''
path = os.path.join(os.path.dirname(__file__), '.', 'data', 'example datalog.xls')
answer = pp.column_of_data(path, 50, 1, units='mg/L')
answer = np.round(answer, 5)
self.assertSequenceEqual(
answer.tolist(),
np.round(np.array([ 21.61681747, 21.31163216, 20.80215263, 20.46752739,
20.1048584 , 19.7037487 , 19.4194355 , 18.95934677,
18.65832138, 18.24054337, 17.93864632, 17.591259 ,
17.25979805, 16.98148346, 16.60666656, 16.28514862,
15.99366856, 15.72474861, 15.35812187, 15.11634636,
14.75801468, 14.53341103, 14.20829868, 13.94124603,
13.69845104, 13.42016983, 13.17064667, 12.94155121,
12.66110611, 12.36821651, 12.1641016 , 11.91081715,
11.69137764, 11.46448898, 11.2214098 , 11.03143692,
10.78680801, 10.56936836, 10.36802101, 10.17097855,
9.95537758, 9.78312111, 9.55150509, 9.3843832 ,
9.21883678, 9.03395939, 8.85475636, 8.68857765,
8.47574997, 8.33256149, 8.13628197, 7.96697569,
7.80458403, 7.68562984, 7.4511261 , 7.34629679,
7.17365456, 7.03930044, 6.88661861, 6.73307562,
6.60730886, 6.45987988, 6.30656338, 6.18089199,
6.05378485, 5.90268421, 5.81327915, 5.68042564,
5.57657337, 5.40122986, 5.33153057, 5.19660377,
5.09033108, 4.96228552, 4.85437012, 4.76652002,
4.66415834, 4.54592991, 4.43500376, 4.34614754,
4.24292231, 4.16423607, 4.06328297, 3.96581864,
3.88231015, 3.7828486 , 3.74253488, 3.62953901,
3.53508115, 3.46755266, 3.36818004, 3.30672598,
3.22161722, 3.13899183, 3.08345532, 2.98398542,
2.94956589, 2.8504107 , 2.79215455, 2.72924852,
2.66635823, 2.60831141, 2.53093195, 2.47217631,
2.42190933, 2.36228228, 2.30094266, 2.24602866,
2.19216943, 2.14143515, 2.10641694, 2.07170939,
2.04412961, 2.0158174 , 2.00059986, 1.98546684,
1.97646523, 1.96455812, 1.95887971, 1.94987118])*u('mg/L'), 5).tolist()
)
path = os.path.join(os.path.dirname(__file__), '.', 'data', 'example datalog.xls')
answer = pp.column_of_data(path, 50, "red dye (mg/L)", units='mg/L')
answer = np.round(answer, 5)
self.assertSequenceEqual(
answer.tolist(),
np.round(np.array([ 21.61681747, 21.31163216, 20.80215263, 20.46752739,
20.1048584 , 19.7037487 , 19.4194355 , 18.95934677,
18.65832138, 18.24054337, 17.93864632, 17.591259 ,
17.25979805, 16.98148346, 16.60666656, 16.28514862,
15.99366856, 15.72474861, 15.35812187, 15.11634636,
14.75801468, 14.53341103, 14.20829868, 13.94124603,
13.69845104, 13.42016983, 13.17064667, 12.94155121,
12.66110611, 12.36821651, 12.1641016 , 11.91081715,
11.69137764, 11.46448898, 11.2214098 , 11.03143692,
10.78680801, 10.56936836, 10.36802101, 10.17097855,
9.95537758, 9.78312111, 9.55150509, 9.3843832 ,
9.21883678, 9.03395939, 8.85475636, 8.68857765,
8.47574997, 8.33256149, 8.13628197, 7.96697569,
7.80458403, 7.68562984, 7.4511261 , 7.34629679,
7.17365456, 7.03930044, 6.88661861, 6.73307562,
6.60730886, 6.45987988, 6.30656338, 6.18089199,
6.05378485, 5.90268421, 5.81327915, 5.68042564,
5.57657337, 5.40122986, 5.33153057, 5.19660377,
5.09033108, 4.96228552, 4.85437012, 4.76652002,
4.66415834, 4.54592991, 4.43500376, 4.34614754,
4.24292231, 4.16423607, 4.06328297, 3.96581864,
3.88231015, 3.7828486 , 3.74253488, 3.62953901,
3.53508115, 3.46755266, 3.36818004, 3.30672598,
3.22161722, 3.13899183, 3.08345532, 2.98398542,
2.94956589, 2.8504107 , 2.79215455, 2.72924852,
2.66635823, 2.60831141, 2.53093195, 2.47217631,
2.42190933, 2.36228228, 2.30094266, 2.24602866,
2.19216943, 2.14143515, 2.10641694, 2.07170939,
2.04412961, 2.0158174 , 2.00059986, 1.98546684,
1.97646523, 1.96455812, 1.95887971, 1.94987118])*u('mg/L'), 5).tolist()
)
def test_column_of_time(self):
'''''
Extract the time column from a data file.
'''''
path = os.path.join(os.path.dirname(__file__), '.', 'data', 'example datalog.xls')
answer = pp.column_of_time(path, 50)
answer = np.round(answer, 5)
self.assertSequenceEqual(
answer.tolist(),
np.round(np.array([0.00000000e+00, 5.78662000e-05, 1.15725500e-04,
1.73586900e-04, 2.31470400e-04, 2.89325100e-04,
3.47199600e-04, 4.05070800e-04, 4.62941200e-04,
5.20805100e-04, 5.78682300e-04, 6.36541000e-04,
6.94405500e-04, 7.52295200e-04, 8.10152600e-04,
8.68025100e-04, 9.25879200e-04, 9.83766900e-04,
1.04163170e-03, 1.09949610e-03, 1.15736260e-03,
1.21522990e-03, 1.27310590e-03, 1.33096560e-03,
1.38884810e-03, 1.44671260e-03, 1.50456890e-03,
1.56244910e-03, 1.62031940e-03, 1.67819090e-03,
1.73605480e-03, 1.79390590e-03, 1.85178640e-03,
1.90965780e-03, 1.96752080e-03, 2.02538760e-03,
2.08325540e-03, 2.14113380e-03, 2.19899280e-03,
2.25686180e-03, 2.31473400e-03, 2.37261100e-03,
2.43048170e-03, 2.48834570e-03, 2.54620210e-03,
2.60408890e-03, 2.66194550e-03, 2.71981170e-03,
2.77768240e-03, 2.83556180e-03, 2.89342620e-03,
2.95130110e-03, 3.00916580e-03, 3.06704400e-03,
3.12490300e-03, 3.18278490e-03, 3.24064920e-03,
3.29852180e-03, 3.35638230e-03, 3.41425150e-03,
3.47212870e-03, 3.52999870e-03, 3.58786830e-03,
3.64572740e-03, 3.70359810e-03, 3.76146930e-03,
3.81933520e-03, 3.87721010e-03, 3.93506860e-03,
3.99295440e-03, 4.05082240e-03, 4.10868470e-03,
4.16654890e-03, 4.22442890e-03, 4.28230160e-03,
4.34016650e-03, 4.39804130e-03, 4.45591720e-03,
4.51377060e-03, 4.57164920e-03, 4.62952340e-03,
4.68739510e-03, 4.74524320e-03, 4.80312930e-03,
4.86098350e-03, 4.91887450e-03, 4.97673430e-03,
5.03459310e-03, 5.09248050e-03, 5.15033640e-03,
5.20820950e-03, 5.26607440e-03, 5.32394690e-03,
5.38181660e-03, 5.43967960e-03, 5.49755470e-03,
5.55543130e-03, 5.61330110e-03, 5.67117330e-03,
5.72903190e-03, 5.78690100e-03, 5.84477570e-03,
5.90264880e-03, 5.96051240e-03, 6.01837960e-03,
6.07625150e-03, 6.13413050e-03, 6.19199110e-03,
6.24987260e-03, 6.30772900e-03, 6.36560880e-03,
6.42346920e-03, 6.48135320e-03, 6.53921020e-03,
6.59709090e-03, 6.65494290e-03, 6.71281870e-03,
6.77069570e-03, 6.82855640e-03, 6.88642010e-03])*u.day, 5).tolist()
)
answer = pp.column_of_time(path, 50, end=60, units='hr')
answer = np.round(answer, 5)
self.assertSequenceEqual(
answer.tolist(),
np.round(np.array([0.00000000e+00, 5.78662000e-05, 1.15725500e-04,
1.73586900e-04, 2.31470400e-04, 2.89325100e-04,
3.47199600e-04, 4.05070800e-04, 4.62941200e-04,
5.20805100e-04])*24*u.hr, 5).tolist()
)
def test_notes(self):
'''''
Test function that extracts meta information from data file.
'''''
path = os.path.join(os.path.dirname(__file__), '.', 'data', 'example datalog.xls')
answer = pp.notes(path)['Day fraction since midnight on ']
x = pd.DataFrame(index=[1, 29, 35],
columns=['Day fraction since midnight on ', 'red dye (mg/L)', 'Run Pump ()', 'Pump ()'])
x.iloc[0][0] = 'Start'
x.iloc[1][0] = 'Start'
x.iloc[2][0] = '30 mg/L'
self.assertSequenceEqual(
answer.tolist(),
x['Day fraction since midnight on '].tolist())
def test_remove_notes(self):
'''
Return a DataFrame without any lines that originally contained text
'''
path = os.path.join(os.path.dirname(__file__), '.', 'data')
output = pp.remove_notes(pd.read_csv(path + '/example datalog.xls', delimiter='\t'))
self.assertSequenceEqual(np.round(pd.to_numeric(output.iloc[:, 0]), 5).tolist(), np.round(np.array(
[0.6842773323, 0.6843351954, 0.6843930789, 0.6844509555, 0.6845088278,
0.6845666989, 0.6846245615, 0.6846824172, 0.6847402968, 0.6847981752,
0.6848560403, 0.6849139126, 0.6849717883, 0.6850296562, 0.6850875147,
0.6851453919, 0.6852032725, 0.6852611229, 0.6853190069, 0.6853768753,
0.6854347496, 0.6854926132, 0.6855504820, 0.6856083520, 0.6856662182,
0.6857240844, 0.6857819618, 0.6858398270, 0.6858977139, 0.6859555700,
0.6860134505, 0.6860713232, 0.6861291842, 0.6861870457, 0.6862449249,
0.6863027915, 0.6863606668, 0.6864185391, 0.6864764071, 0.6865342703,
0.6865921393, 0.6866500041, 0.6867078679, 0.6867657506, 0.6868236041,
0.6868814757, 0.6869393510, 0.6869972210, 0.6870550872, 0.6871129465,
0.6871708079, 0.6872286914, 0.6872865461, 0.6873444206, 0.6874022918,
0.6874601622, 0.6875180261, 0.6875759033, 0.6876337620, 0.6876916265,
0.6877495162, 0.6878073736, 0.6878652461, 0.6879231002, 0.6879809879,
0.6880388527, 0.6880967171, 0.6881545836, 0.6882124509, 0.6882703269,
0.6883281866, 0.6883860691, 0.6884439336, 0.6885017899, 0.6885596701,
0.6886175404, 0.6886754119, 0.6887332758, 0.6887911269, 0.6888490074,
0.6889068788, 0.6889647418, 0.6890226086, 0.6890804764, 0.6891383548,
0.6891962138, 0.6892540828, 0.6893119550, 0.6893698320, 0.6894277027,
0.6894855667, 0.6895434231, 0.6896013099, 0.6896591665, 0.6897170327,
0.6897749034, 0.6898327828, 0.6898906472, 0.6899485221, 0.6900063868,
0.6900642650, 0.6901221240, 0.6901800059, 0.6902378702, 0.6902957428,
0.6903536033, 0.6904114725, 0.6904693497, 0.6905272197, 0.6905850893,
0.6906429484, 0.6907008191, 0.6907586903, 0.6908165562, 0.6908744311,
0.6909322896, 0.6909901754, 0.6910480434, 0.6911059057, 0.6911637699,
0.6912216499, 0.6912795226, 0.6913373875, 0.6913952623, 0.6914531382,
0.6915109916, 0.6915688702, 0.6916267444, 0.6916846161, 0.6917424642,
0.6918003503, 0.6918582045, 0.6919160955, 0.6919739553, 0.6920318141,
0.6920897015, 0.6921475574, 0.6922054305, 0.6922632954, 0.6923211679,
0.6923790376, 0.6924369006, 0.6924947757, 0.6925526523, 0.6926105221,
0.6926683943, 0.6927262529, 0.6927841220, 0.6928419967, 0.6928998698,
0.6929577334, 0.6930156006, 0.6930734725, 0.6931313515, 0.6931892121,
0.6932470936, 0.6933049500, 0.6933628298, 0.6934206902, 0.6934785742,
0.6935364312, 0.6935943119, 0.6936521639, 0.6937100397, 0.6937679167,
0.6938257774, 0.6938836411]), 5).tolist())
def test_get_data_by_time(self):
'''
Extract column(s) of data between given starting and ending days and times
'''
path = os.path.join(os.path.dirname(__file__), '.', 'data')
data_day1 = pd.read_csv(path + '/datalog_6-14-2018.xls', delimiter='\t')
data_day1 = np.round([pd.to_numeric(data_day1.iloc[:, 0]), pd.to_numeric(data_day1.iloc[:, 4])], 5)
data_day1 = [data_day1[0].tolist(), data_day1[1].tolist()]
data_day2 = pd.read_csv(path + '/datalog_6-15-2018.xls', delimiter='\t')
data_day2 = np.round([pd.to_numeric(data_day2.iloc[:, 0]), pd.to_numeric(data_day2.iloc[:, 4])], 5)
data_day2 = [data_day2[0].tolist(), data_day2[1].tolist()]
data_day2[0][0] = 0 # to remove scientific notation "e-"
# SINGLE COLUMN, ONE DAY
output = pp.get_data_by_time(path=path, columns=0, dates="6-14-2018", start_time="12:20",
end_time="13:00", extension=".xls")
self.assertSequenceEqual(np.round(output, 5).tolist(), data_day1[0][1041:1282])
# SINGLE COLUMN, TWO DAYS
output = pp.get_data_by_time(path=path, columns=0, dates=["6-14-2018", "6-15-2018"],
start_time="12:20", end_time="10:50", extension=".xls")
time_column = data_day1[0][1041:] + np.round(np.array(data_day2[0][:3901])+1, 5).tolist()
self.assertSequenceEqual(np.round(output, 5).tolist(), time_column)
# MULTI COLUMN, ONE DAY
output = pp.get_data_by_time(path=path, columns=[0, 4], dates=["6-14-2018"], start_time="12:20",
end_time="13:00", extension=".xls")
self.assertSequenceEqual(np.round(output[0], 5).tolist(), data_day1[0][1041:1282])
self.assertSequenceEqual(np.round(output[1], 5).tolist(), data_day1[1][1041:1282])
# MULTI COLUMN, TWO DAYS
output = pp.get_data_by_time(path=path, columns=[0, 4], dates=["6-14-2018", "6-15-2018"],
start_time="12:20", end_time="10:50", extension=".xls")
time_column = data_day1[0][1041:] + np.round(np.array(data_day2[0][:3901])+1, 5).tolist()
self.assertSequenceEqual(np.round(output[0], 5).tolist(), time_column)
self.assertSequenceEqual(np.round(output[1], 5).tolist(), data_day1[1][1041:]+data_day2[1][:3901])
# MULTI COLUMN, TWO DAYS, WITH UNITS
output = pp.get_data_by_time(path=path, columns=[0, 4], dates=["6-14-2018", "6-15-2018"],
start_time="12:20", end_time="10:50", extension=".xls", units=['day', 'mg/L'])
time_column = data_day1[0][1041:] + np.round(np.array(data_day2[0][:3901])+1, 5).tolist()
self.assertEqual(output[0].units, u.day)
self.assertSequenceEqual(np.round(output[0].magnitude, 5).tolist(), time_column)
self.assertEqual(output[1].units, u.mg/u.L)
self.assertSequenceEqual(np.round(output[1].magnitude, 5).tolist(), data_day1[1][1041:]+data_day2[1][:3901])
######## WITH ELAPSED TIME ########
start = pp.day_fraction("12:20")
data_day1 = pd.read_csv(path + '/datalog_6-14-2018.xls', delimiter='\t')
data_day1 = [np.round(pd.to_numeric(data_day1.iloc[:, 0]) - start, 5).tolist(),
np.round(pd.to_numeric(data_day1.iloc[:, 4]), 5).tolist()]
data_day2 = pd.read_csv(path + '/datalog_6-15-2018.xls', delimiter='\t')
data_day2.iloc[0,0] = 0 # to remove scientific notation "e-"
data_day2 = [np.round(pd.to_numeric(data_day2.iloc[:, 0]) - start + 1, 5).tolist(),
np.round(pd.to_numeric(data_day2.iloc[:, 4]), 5).tolist()]
# SINGLE COLUMN, ONE DAY
output = pp.get_data_by_time(path=path, columns=0, dates="6-14-2018", start_time="12:20",
end_time="13:00", extension=".xls", elapsed=True)
self.assertSequenceEqual(np.round(output, 5).tolist(), data_day1[0][1041:1282])
# MULTI COLUMN, TWO DAYS
output = pp.get_data_by_time(path=path, columns=[0, 4], dates=["6-14-2018", "6-15-2018"],
start_time="12:20", end_time="10:50", extension=".xls",
elapsed=True)
self.assertSequenceEqual(np.round(output[0], 5).tolist(), data_day1[0][1041:]+data_day2[0][:3901])
self.assertSequenceEqual(np.round(output[1], 5).tolist(), data_day1[1][1041:]+data_day2[1][:3901])
def test_day_fraction(self):
'''
Converts time into a fraction of the day
'''
time = pp.day_fraction(time="12:00")
self.assertEqual(time, 0.5)
def test_data_from_dates(self):
'''
Return a list of DataFrames representing the ProCoDA data files stored in the given path and recorded on the given dates.
'''
path = os.path.join(os.path.dirname(__file__), '.', 'data')
dataFromPath = pd.read_csv(path + '/datalog_6-15-2018.xls', delimiter='\t')
getDataFromDates = pp.data_from_dates(path=path, dates='6-15-2018', extension=".xls")[0]
self.assertTrue(getDataFromDates.equals(dataFromPath))
def test_column_start_to_end(self):
'''
Return entries in column from starting index in first DataFrame to ending index in last DataFrame
'''
#One DataFrame
path = os.path.join(os.path.dirname(__file__), '.', 'data')
data_manual1 = pd.read_csv(path + '/datalog_6-14-2018.xls', delimiter='\t')
getColData1 = pp.column_start_to_end(data=[data_manual1], column=1, start_idx=2, end_idx=7)
compareColData1 = [-4.34825945, -2.3821919, -2.57200098, -2.40549088,
-1.00214481]
self.assertSequenceEqual(getColData1, compareColData1)
#Three DataFrames
data_manual2 = pd.read_csv(path + '/datalog_6-16-2018.xls', delimiter='\t')
data_manual3 = pd.read_csv(path + '/datalog_6-15-2018.xls', delimiter='\t')
getColData2 = pp.column_start_to_end([data_manual1, data_manual2, data_manual3],
column=2, start_idx=5238, end_idx=2)
compareColData2 = [24.26625443, 24.2669487, 24.26613235, 24.26708603,
24.26683617, 24.26708603, 24.26683617]
self.assertSequenceEqual(getColData2, compareColData2)
def test_get_data_by_state(self):
'''
Extract the time column and a data column for each iteration of a state
'''
path = os.path.join(os.path.dirname(__file__), '.', 'data')
# Local path
output = pp.get_data_by_state(path, dates="6-19-2013", state=1, column=1, extension=".xls") # , "6-20-2013"
datafile = pd.read_csv(path + "/datalog_6-19-2013.xls", delimiter='\t')
time_and_data1 = np.array([pd.to_numeric(datafile.iloc[:, 0]),
np.round(pd.to_numeric(datafile.iloc[:, 1]), 5)])
start_time = time_and_data1[0, 0]
answer = [time_and_data1[:, 98:175], time_and_data1[:, 220:485],
time_and_data1[:, 3039:3304], time_and_data1[:, 5858:6123],
time_and_data1[:, 8677:8942], time_and_data1[:, 11496:11761],
time_and_data1[:, 14315:14580]]
for i in range(len(output)):
output_i = np.round(np.array(output[i]).astype(np.double), 5)
self.assertSequenceEqual([j[0] for j in output_i], [round(j-start_time, 5) for j in answer[i][0]])
self.assertSequenceEqual([j[1] for j in output_i], [j for j in answer[i][1]])
# Acceptable URL
url_acceptable = 'https://raw.githubusercontent.com/monroews/playing/master/ProCoDA_data'
output = pp.get_data_by_state(url_acceptable, dates="11-5-2019", state=1, column=1, extension='.tsv')
answer = pp.get_data_by_state(path, dates="11-5-2019", state=1, column=1, extension='.tsv')
for i in range(len(output)):
self.assertSequenceEqual([round(o, 5) for o in output[i][:,0]], [round(a, 5) for a in answer[i][:,0]])
self.assertSequenceEqual([round(o, 5) for o in output[i][:,1]], [round(a, 5) for a in answer[i][:,1]])
# Github.com URL (blob)
url_github = 'https://github.com/monroews/playing/blob/master/ProCoDA_data'
output = pp.get_data_by_state(url_github, dates="11-5-2019", state=1, column=1, extension='.tsv')
for i in range(len(output)):
self.assertSequenceEqual([round(o, 5) for o in output[i][:,0]], [round(a, 5) for a in answer[i][:,0]])
self.assertSequenceEqual([round(o, 5) for o in output[i][:,1]], [round(a, 5) for a in answer[i][:,1]])
# Github.com URL (tree)
url_github = 'https://github.com/monroews/playing/tree/master/ProCoDA_data'
output = pp.get_data_by_state(url_github, dates="11-5-2019", state=1, column=1, extension='.tsv')
for i in range(len(output)):
self.assertSequenceEqual([round(o, 5) for o in output[i][:,0]], [round(a, 5) for a in answer[i][:,0]])
self.assertSequenceEqual([round(o, 5) for o in output[i][:,1]], [round(a, 5) for a in answer[i][:,1]])
def test_plot_columns(self):
'''
Plot the columns of data given the file located by labels
'''
path = os.path.join(os.path.dirname(__file__), '.', 'data') + '/statelog_6-14-2018.xls'
plt.figure()
pp.plot_columns(path=path, columns=" State ID")
plt.savefig("Image1.png")
plt.figure()
plt.plot([0,1,0,1,2])
plt.savefig("Image2.png")
self.assertEqual(None, compare_images("Image2.png", "Image1.png", 0))
plt.figure()
pp.plot_columns(path=path, columns=" State ID", x_axis=" State ID")
plt.savefig("Image3.png")
plt.figure()
plt.plot([0,1,0,1,2], [0,1,0,1,2])
plt.savefig("Image4.png")
self.assertEqual(None, compare_images("Image4.png", "Image3.png", 0))
plt.figure()
pp.plot_columns(path=path, columns=[" State ID"])
plt.savefig("Image5.png")
self.assertEqual(None, compare_images("Image1.png", "Image5.png", 0))
plt.figure()
pp.plot_columns(path=path, columns=[" State ID"], x_axis=" State ID")
plt.savefig("Image6.png")
self.assertEqual(None, compare_images("Image4.png", "Image6.png", 0))
self.assertRaisesRegex(ValueError, 'columns must be a string or list of strings',
pp.plot_columns, *(path, 9))
os.remove("Image1.png")
os.remove("Image2.png")
os.remove("Image3.png")
os.remove("Image4.png")
os.remove("Image5.png")
os.remove("Image6.png")
def test_iplot_columns(self):
'''
Plot the columns of data given the file located by indices
'''
path = os.path.join(os.path.dirname(__file__), '.', 'data') + '/statelog_6-14-2018.xls'
plt.figure()
pp.iplot_columns(path=path, columns=1)
plt.savefig("Image1.png")
plt.figure()
plt.plot([0,1,0,1,2])
plt.savefig("Image2.png")
self.assertEqual(None, compare_images("Image2.png", "Image1.png", 0))
plt.figure()
pp.iplot_columns(path=path, columns=1, x_axis=1)
plt.savefig("Image3.png")
plt.figure()
plt.plot([0,1,0,1,2], [0,1,0,1,2])
plt.savefig("Image4.png")
self.assertEqual(None, compare_images("Image4.png", "Image3.png", 0))
plt.figure()
pp.iplot_columns(path=path, columns=[1])
plt.savefig("Image5.png")
self.assertEqual(None, compare_images("Image1.png", "Image5.png", 0))
plt.figure()
pp.iplot_columns(path=path, columns=[1], x_axis=1)
plt.savefig("Image6.png")
self.assertEqual(None, compare_images("Image4.png", "Image6.png", 0))
self.assertRaisesRegex(ValueError, 'columns must be an int or a list of ints',
pp.iplot_columns, *(path, ' State ID'))
os.remove("Image1.png")
os.remove("Image2.png")
os.remove("Image3.png")
os.remove("Image4.png")
os.remove("Image5.png")
os.remove("Image6.png")
def test_read_state(self):
path = os.path.join(os.path.dirname(__file__), '.', 'data', '')
output_time, output_data = pp.read_state(["6-19-2013", "6-20-2013"], 1, 28, "mL/s", path, extension=".xls")
df_day1 = pd.read_csv(path + "/datalog_6-19-2013.xls", delimiter='\t')
df_day2 = pd.read_csv(path + "/datalog_6-20-2013.xls", delimiter='\t')
time_day1 = df_day1.iloc[:,0]
data_day1 = df_day1.iloc[:,28]
time_day2 = df_day2.iloc[:,0] + 1
data_day2 = df_day2.iloc[:,28]
answer_time = pd.concat([
time_day1[98:175], time_day1[220:485], time_day1[3039:3304],
time_day1[5858:6123], time_day1[8677:8942], time_day1[11496:11761],
time_day1[14315:14580],
time_day2[1442:1707], time_day2[4261:4526], time_day2[7080:7345],
time_day2[9899:10164], time_day2[12718:12983], time_day2[36572:40549],
time_day2[41660:41694], time_day2[41696:41698]
]) - time_day1.iloc[0]
answer_data = pd.concat([
data_day1[98:175], data_day1[220:485], data_day1[3039:3304],
data_day1[5858:6123], data_day1[8677:8942], data_day1[11496:11761],
data_day1[14315:14580],
data_day2[1442:1707], data_day2[4261:4526], data_day2[7080:7345],
data_day2[9899:10164], data_day2[12718:12983], data_day2[36572:40549],
data_day2[41660:41694], data_day2[41696:41698]
])
self.assertEqual(output_time.units, u.day)
self.assertSequenceEqual(list(output_time.magnitude), list(answer_time))
self.assertEqual(output_data.units, u.mL/u.s)
self.assertSequenceEqual(list(output_data.magnitude), list(answer_data))
def test_average_state(self):
path = os.path.join(os.path.dirname(__file__), '.', 'data', '')
avgs = pp.average_state(["6-19-2013", "6-20-2013"], 1, 28, "mL/s", path,
extension=".xls")
avgs = np.round(avgs, 5)
self.assertSequenceEqual(
avgs.tolist(),
[5.5, 5.5, 5.5, 5.43125, 5.42094, 5.40908, 5.39544, 5.37976, 5.36172,
5.34098, 5.31712, 5.28969, 5.5, 5.5, 5.5]*u.mL/u.s
)
def test_perform_function_on_state(self):
path = os.path.join(os.path.dirname(__file__), '.', 'data', '')
def avg_with_units(lst):
num = np.size(lst)
acc = 0
for i in lst:
acc = i + acc
return acc / num
avgs = pp.perform_function_on_state(avg_with_units,
["6-19-2013", "6-20-2013"], 1, 28,
"mL/s", path, extension=".xls")
avgs = np.round(avgs, 5)
self.assertSequenceEqual(
avgs.tolist(),
[5.5, 5.5, 5.5, 5.43125, 5.42094, 5.40908, 5.39544, 5.37976, 5.36172,
5.34098, 5.31712, 5.28969, 5.5, 5.5, 5.5]*u.mL/u.s
)
def test_read_state_with_metafile(self):
path = os.path.join(os.path.dirname(__file__), '.', 'data', 'Test Meta File.txt')
def avg_with_units(lst):
num = np.size(lst)
acc = 0
for i in lst:
acc = i + acc
return acc / num
ids, answer = pp.read_state_with_metafile(avg_with_units, 1, 28, path, [], ".xls", "mg/L")
self.assertSequenceEqual(["1", "2"], ids.tolist())
self.assertSequenceEqual([5.445427082723495, 5.459751965314751]*u.mg/u.L, answer)
def test_write_calculations_to_csv(self):
path = os.path.join(os.path.dirname(__file__), '.', 'data', 'Test Meta File.txt')
out_path = os.path.join(os.path.dirname(__file__), '.', 'data', 'test_output.txt')
def avg_with_units(lst):
num = np.size(lst)
acc = 0
for i in lst:
acc = i + acc
return acc / num
output = pp.write_calculations_to_csv(avg_with_units, 1, 28, path,
["Average Conc (mg/L)"], out_path,
extension=".xls")
self.assertSequenceEqual(["1", "2"], output['ID'].tolist())
self.assertSequenceEqual(
[5.445427082723495, 5.459751965314751],
output['Average Conc (mg/L)'].tolist())
def test_intersect(self):
#tests one crossing
x = np.array([1,2,3])
y1 = np.array([2,6,8])
y2 = np.array([6,2,3])
output = pp.intersect(x, y1, y2)
expected = (np.array([1.5]), np.array([4]), np.array([1]))
for i in range(len(expected)):
self.assertSequenceEqual(list(expected[i]), list(output[i]))
#tests two crossings
x = np.array([1,2,3,4,5,6])
y1 = | np.array([2,6,8,4,1]) | numpy.array |
import numpy as np
center = np.array([1, 0, 1])
x_w = np.array([1, 0, 0])
y_w = np.array([0, 1, 0])
z_w = | np.array([0, 0, 1]) | numpy.array |
import json
import numpy as np
import matplotlib.pyplot as plt
import os
import yaml
from sklearn.metrics import f1_score, roc_auc_score
from fcos_core.config.paths_catalog import DatasetCatalog
from Data.Preprocess import join_path
def compute_iou(box1, box2):
"""
Compute IoU between two boxes.
box1: [b1_y1, b1_x1, b1_y2, b1_x2]
box2: [b2_y1, b2_x1, b2_y2, b2_x2]
return: float
"""
# Compute intersection
b1_y1, b1_x1, b1_h, b1_w = box1
b2_y1, b2_x1, b2_h, b2_w = box2
b1_y2, b1_x2 = b1_y1+b1_h, b1_x1+b1_w
b2_y2, b2_x2 = b2_y1+b2_h, b2_x1+b2_w
y1 = max(b1_y1, b2_y1)
x1 = max(b1_x1, b2_x1)
y2 = min(b1_y2, b2_y2)
x2 = min(b1_x2, b2_x2)
intersection = max(x2 - x1, 0)*max(y2 - y1, 0)
# Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
iou = intersection / union
return iou
def iou_matrix(boxes1, boxes2):
n = len(boxes1)
m = len(boxes2)
matrix = np.zeros([n, m])
for i in range(n):
for j in range(m):
matrix[i, j] = compute_iou(boxes1[i], boxes2[j])
return matrix
def get_cls_results(gt_boxes, pred_boxes, iou_th):
gt = [x['category_id'] for x in gt_boxes]
gt_bbox = [x['bbox'] for x in gt_boxes]
pred = [x['category_id'] for x in pred_boxes]
pred_bbox = [x['bbox'] for x in pred_boxes]
pred_score = [x['score'] for x in pred_boxes]
matrix = iou_matrix(pred_bbox, gt_bbox) # (n_pred, n_label)
out_label = []
# out_score = []
out_pred = []
# tp
tp_index = np.nonzero(matrix>iou_th)
for i in range(tp_index[0].size):
pred_index = tp_index[0][i]
label_index = tp_index[1][i]
# best pred in duplicated preds
if matrix[pred_index, label_index] == np.max(matrix[:,label_index]).item():
out_label.append(gt[label_index])
# out_score.append(pred_score[tp_index[0][i]])
out_pred.append(pred[pred_index])
# duplicate preds, taken as fp
else:
out_label.append(0)
# out_score.append(pred_score[fp_index[i]])
out_pred.append(pred[pred_index])
# fp
fp_index = np.nonzero(np.max(matrix, axis=1)<=iou_th)
for i in range(fp_index[0].size):
out_label.append(0)
# out_score.append(pred_score[fp_index[i]])
out_pred.append(pred[fp_index[0][i]])
# fn
if len(pred)>0:
fn_index = np.nonzero(np.max(matrix, axis=0)<=iou_th)
for i in range(fn_index[0].size):
out_label.append(gt[fn_index[0][i]])
# out_score.append()
out_pred.append(0)
else:
out_label.extend(gt)
# out_score.append()
out_pred.extend([0,]*len(gt))
return out_label, out_pred
def compute_auc(pred, label, negative_th):
pred = pred/4
label = np.where(label>negative_th, 1, 0)
auc = roc_auc_score(label, pred)
return auc
def confusion_metrix(pred, label, negative_th):
pred = np.array(pred)
label = np.array(label)
f1 = f1_score(label, pred, average='macro')
auc = compute_auc(pred, label, negative_th)
acc_allclass = np.count_nonzero(pred==label)/pred.size
acc_ap1 = np.count_nonzero(np.abs(pred-label)<2)/pred.size
pred = np.where(pred>negative_th, 1, 0)
label = np.where(label>negative_th, 1, 0)
tp = | np.count_nonzero(pred*label) | numpy.count_nonzero |
# -*- coding: utf-8 -*-
"""
Site frequency spectra.
See also the examples at:
- http://nbviewer.ipython.org/github/alimanfoo/anhima/blob/master/examples/sf.ipynb
""" # noqa
from __future__ import division, print_function, absolute_import
# third party dependencies
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
def site_frequency_spectrum(derived_ac):
"""Calculate the site frequency spectrum, given derived allele counts for a
set of biallelic variant sites.
Parameters
----------
derived_ac : array_like, int
A 1-dimensional array of shape (n_variants,) where each array
element holds the count of derived alleles found for a single variant
across some set of samples.
Returns
-------
sfs : ndarray, int
An array of integers where the value of the kth element is the
number of variant sites with k derived alleles.
See Also
--------
site_frequency_spectrum_scaled, site_frequency_spectrum_folded,
site_frequency_spectrum_folded_scaled, plot_site_frequency_spectrum
"""
# check input
derived_ac = np.asarray(derived_ac)
assert derived_ac.ndim == 1
# calculate frequency spectrum
sfs = np.bincount(derived_ac)
return sfs
def site_frequency_spectrum_folded(biallelic_ac):
"""Calculate the folded site frequency spectrum, given reference and
alternate allele counts for a set of biallelic variants.
Parameters
----------
biallelic_ac : array_like int
A 2-dimensional array of shape (n_variants, 2), where each row
holds the reference and alternate allele counts for a single
biallelic variant across some set of samples.
Returns
-------
sfs_folded : ndarray, int
An array of integers where the value of the kth element is the
number of variant sites with k observations of the minor allele.
See Also
--------
site_frequency_spectrum, site_frequency_spectrum_scaled,
site_frequency_spectrum_folded_scaled, plot_site_frequency_spectrum
"""
# check input
biallelic_ac = np.asarray(biallelic_ac)
assert biallelic_ac.ndim == 2
assert biallelic_ac.shape[1] == 2
# calculate minor allele counts
minor_ac = np.amin(biallelic_ac, axis=1)
# calculate frequency spectrum
sfs_folded = np.bincount(minor_ac)
return sfs_folded
def site_frequency_spectrum_scaled(derived_ac):
"""Calculate the site frequency spectrum, scaled such that a constant value
is expected across the spectrum for neutral variation and a population at
constant size.
Parameters
----------
derived_ac : array_like, int
A 1-dimensional array of shape (n_variants,) where each array
element holds the count of derived alleles found for a single variant
across some set of samples.
Returns
-------
sfs_scaled : ndarray, int
An array of integers where the value of the kth element is the
number of variant sites with k derived alleles, multiplied by k.
Notes
-----
Under neutrality and constant population size, site frequency
is expected to be constant across the spectrum, and to approximate
the value of the population-scaled mutation rate theta.
See Also
--------
site_frequency_spectrum, site_frequency_spectrum_folded,
site_frequency_spectrum_folded_scaled, plot_site_frequency_spectrum
"""
# calculate frequency spectrum
sfs = site_frequency_spectrum(derived_ac)
# scaling
k = np.arange(sfs.size)
sfs_scaled = sfs * k
return sfs_scaled
def site_frequency_spectrum_folded_scaled(biallelic_ac, m=None):
"""Calculate the folded site frequency spectrum, scaled such that a
constant value is expected across the spectrum for neutral variation and
a population at constant size.
Parameters
----------
biallelic_ac : array_like int
A 2-dimensional array of shape (n_variants, 2), where each row
holds the reference and alternate allele counts for a single
biallelic variant across some set of samples.
m : int, optional
The total number of alleles observed at each variant site. Equal to
the number of samples multiplied by the ploidy. If not provided,
will be inferred to be the maximum value of the sum of reference and
alternate allele counts present in `biallelic_ac`.
Returns
-------
sfs_folded_scaled : ndarray, int
An array of integers where the value of the kth element is the
number of variant sites with k observations of the minor allele,
multiplied by the scaling factor (k * (m - k) / m).
Notes
-----
Under neutrality and constant population size, site frequency
is expected to be constant across the spectrum, and to approximate
the value of the population-scaled mutation rate theta.
This function is useful where the ancestral and derived status of alleles
is unknown.
See Also
--------
site_frequency_spectrum, site_frequency_spectrum_scaled,
site_frequency_spectrum_folded, plot_site_frequency_spectrum
"""
# calculate the folded site frequency spectrum
sfs_folded = site_frequency_spectrum_folded(biallelic_ac)
# determine the total number of alleles per variant
if m is None:
m = np.amax(np.sum(biallelic_ac, axis=1))
# scaling
k = | np.arange(sfs_folded.size) | numpy.arange |
"""
.. module:: dst_povm_sampling.py
:synopsis: Sample projective measurements in the way that DST does
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import division, absolute_import, print_function, unicode_literals
import numpy as np
from itertools import product
def reseed_choice(a, size=None, replace=True, p=None):
"""Wrapper for the numpy choice function that reseeds before sampling to
ensure that it doesn't make identical choices accross different parallel
runs.
"""
np.random.seed()
return np.random.choice(a=a, size=size, replace=replace, p=p)
def x_state(anc_outcome, sys_outcome, phi):
r"""Return the state corresponding to the projective measurement implied by
a particular outcome (:math:`\pm1`) of the x-measurement on the ancilla and
a particular outcome (:math:`\widetilde{\pm}1`) of the x-measurement on the
system:
.. math::
\begin{align}
\vert\psi\rangle&=\cos\frac{\theta}{2}\vert0\rangle+
\sin\frac{\theta}{2}\vert1\rangle \\
\theta&=\begin{cases}\operatorname{arctan2}\left(\pm2\cos\varphi,
\,-\sin^2\varphi\right) & \widetilde{+} \\
0 & \widetilde{-}\end{cases}
\end{align}
:param anc_outcome: :math:`\pm1`, indicates eigenvalue observed on ancilla
x-measurement
:param sys_outcome: :math:`\widetilde{\pm}1`, indicates eigenvalue observed
on system x-measurement
:param phi: The strength of the interaction
:returns: The state represented in the standard computational (z)
basis
"""
theta = np.where(anc_outcome > 0, np.arctan2(2*sys_outcome*np.cos(phi),
-np.sin(phi)**2), 0)
return np.array([np.cos(theta/2), np.sin(theta/2)])
def y_state(anc_outcome, sys_outcome, phi):
r"""Return the state corresponding to the projective measurement implied by
a particular outcome (:math:`\pm1`) of the y-measurement on the ancilla and
a particular outcome on the system (:math:`\widetilde{\pm}1`):
.. math::
\begin{align}
\vert\psi\rangle&=\cos\frac{\theta}{2}\vert0\rangle+
\sin\frac{\theta}{2}\vert1\rangle \\
\theta&=\operatorname{arccos}\left(\widetilde{\pm}
\frac{2\left\{\begin{array}{l r}\sin(\varphi+\pi/4) & + \\
\cos(\varphi+\pi/4) & -\end{array}\right\}^2-1}{2\left\{\begin{array}
{l r}\sin(\varphi+\pi/4) & + \\ \cos(\varphi+\pi/4) & -\end{array}
\right\}^2+1}\right)
\end{align}
:param anc_outcome: :math:`\pm1`, indicates eigenvalue observed on ancilla
z-measurement
:param sys_outcome: :math:`\widetilde{\pm}1`, indicates eigenvalue observed
on system x-measurement
:param phi: The strength of the interaction
:returns: The state represented in the standard computational (z)
basis
"""
sc = np.where(anc_outcome > 0, np.sin(phi + np.pi/4), np.cos(phi + np.pi/4))
theta = | np.arccos(sys_outcome*(2*sc**2 - 1)/(2*sc**2 + 1)) | numpy.arccos |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import warnings
import numpy as np
import random
import six
import time
import itertools
import collections
from collections import defaultdict
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, OpProtoHolder, Variable
from testsuite import create_op, set_input, append_input_output, append_loss_ops
from paddle.fluid import unique_name
from white_list import op_accuracy_white_list, check_shape_white_list, compile_vs_runtime_white_list, no_check_set_white_list
def _set_use_system_allocator(value=None):
USE_SYSTEM_ALLOCATOR_FLAG = "FLAGS_use_system_allocator"
old_value = core.globals()[USE_SYSTEM_ALLOCATOR_FLAG]
value = old_value if value is None else value
core.globals()[USE_SYSTEM_ALLOCATOR_FLAG] = value
return old_value
def randomize_probability(batch_size, class_num, dtype='float32'):
prob = np.random.uniform(
0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
prob_sum = prob.sum(axis=1)
for i in six.moves.xrange(len(prob)):
prob[i] /= prob_sum[i]
return prob
def get_numeric_gradient(place,
scope,
op,
inputs,
input_to_check,
output_names,
delta=0.005,
in_place=False):
# FIXME: change this method by compile time concepts
set_input(scope, op, inputs, place)
def product(dim):
return six.moves.reduce(lambda a, b: a * b, dim, 1)
tensor_to_check = scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.shape())
if not hasattr(get_numeric_gradient, 'check_shape_time'):
get_numeric_gradient.check_shape_time = 0
if tensor_size >= 100:
get_numeric_gradient.check_shape_time += 1
tensor_to_check_dtype = tensor_to_check._dtype()
if tensor_to_check_dtype == core.VarDesc.VarType.FP32:
tensor_to_check_dtype = np.float32
elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:
tensor_to_check_dtype = np.float64
elif tensor_to_check_dtype == core.VarDesc.VarType.FP16:
tensor_to_check_dtype = np.float16
# set delta as np.float16, will automatic convert to float32, float64
delta = np.array(delta).astype(np.float16)
else:
raise ValueError("Not supported data type " + str(
tensor_to_check_dtype))
def get_output():
sum = []
op.run(scope, place)
for output_name in output_names:
sum.append(
np.array(scope.find_var(output_name).get_tensor()).astype(
tensor_to_check_dtype).mean())
return tensor_to_check_dtype(np.array(sum).sum() / len(output_names))
gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)
def __get_elem__(tensor, i):
if tensor_to_check_dtype == np.float16:
numpy_tensor = np.array(tensor).astype(np.float16)
numpy_tensor = numpy_tensor.flatten()
return numpy_tensor[i]
elif tensor_to_check_dtype == np.float32:
return tensor._get_float_element(i)
else:
return tensor._get_double_element(i)
def __set_elem__(tensor, i, e):
if tensor_to_check_dtype == np.float16:
numpy_tensor = np.array(tensor).astype(np.float16)
shape = numpy_tensor.shape
numpy_tensor = numpy_tensor.flatten()
numpy_tensor[i] = e
numpy_tensor = numpy_tensor.reshape(shape)
tensor.set(numpy_tensor, place)
elif tensor_to_check_dtype == np.float32:
tensor._set_float_element(i, e)
else:
tensor._set_double_element(i, e)
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in six.moves.xrange(tensor_size):
if in_place:
set_input(scope, op, inputs, place)
# get one input element throw it's index i.
origin = __get_elem__(tensor_to_check, i)
# add delta to it, run op and then get the sum of the result tensor.
x_pos = origin + delta
__set_elem__(tensor_to_check, i, x_pos)
y_pos = get_output()
if in_place:
set_input(scope, op, inputs, place)
x_neg = origin - delta
__set_elem__(tensor_to_check, i, x_neg)
y_neg = get_output()
__set_elem__(tensor_to_check, i, origin)
gradient_flat[i] = (y_pos - y_neg) / delta / 2
return gradient_flat.reshape(tensor_to_check.shape())
def skip_check_grad_ci(reason=None):
"""Decorator to skip check_grad CI.
Check_grad is required for Op test cases. However, there are some special
cases that do not need to do check_grad. This decorator is used to skip the
check_grad of the above cases.
Note: the execution of unit test will not be skipped. It just avoids check_grad
checking in tearDownClass method by setting a `no_need_check_grad` flag.
Example:
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestInference(OpTest):
"""
if not isinstance(reason, str):
raise AssertionError("The reason for skipping check_grad is required.")
def wrapper(cls):
cls.no_need_check_grad = True
return cls
return wrapper
class OpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''Fix random seeds to remove randomness from tests'''
cls._np_rand_state = np.random.get_state()
cls._py_rand_state = random.getstate()
cls.call_once = False
cls.dtype = None
cls.outputs = {}
np.random.seed(123)
random.seed(124)
cls._use_system_allocator = _set_use_system_allocator(True)
@classmethod
def tearDownClass(cls):
"""Restore random seeds"""
np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state)
_set_use_system_allocator(cls._use_system_allocator)
def is_empty_grad_op(op_type):
all_op_kernels = core._get_all_register_op_kernels()
grad_op = op_type + '_grad'
if grad_op in all_op_kernels.keys():
if hasattr(cls, "use_mkldnn") and cls.use_mkldnn == True:
grad_op_kernels = all_op_kernels[grad_op]
for grad_op_kernel in grad_op_kernels:
if 'MKLDNN' in grad_op_kernel:
return False
else:
return False
return True
if not hasattr(cls, "op_type"):
raise AssertionError(
"This test do not have op_type in class attrs,"
" please set self.__class__.op_type=the_real_op_type manually.")
# case in NO_FP64_CHECK_GRAD_CASES and op in NO_FP64_CHECK_GRAD_OP_LIST should be fixed
if not hasattr(cls, "no_need_check_grad") \
and not is_empty_grad_op(cls.op_type):
if cls.dtype is None or \
(cls.dtype == np.float16 \
and cls.op_type not in op_accuracy_white_list.NO_FP16_CHECK_GRAD_OP_LIST \
and not hasattr(cls, "exist_check_grad")):
raise AssertionError("This test of %s op needs check_grad." %
cls.op_type)
if cls.dtype in [np.float32, np.float64] \
and cls.op_type not in op_accuracy_white_list.NO_FP64_CHECK_GRAD_OP_LIST \
and not hasattr(cls, 'exist_fp64_check_grad'):
raise AssertionError(
"This test of %s op needs check_grad with fp64 precision." %
cls.op_type)
if hasattr(get_numeric_gradient, 'check_shape_time') \
and get_numeric_gradient.check_shape_time == 0 \
and OpTest.op_type not in check_shape_white_list.NOT_CHECK_OP_LIST \
and OpTest.op_type not in check_shape_white_list.NEED_TO_FIX_OP_LIST:
raise AssertionError(
"At least one input's shape should be large than or equal to 100 for "
+ OpTest.op_type + " Op.")
def try_call_once(self, data_type):
if not self.call_once:
self.call_once = True
self.dtype = data_type
def infer_dtype_from_inputs_outputs(self, inputs, outputs):
def is_np_data(input):
return isinstance(input, (np.ndarray, np.generic))
def infer_dtype(numpy_dict, dtype_set):
assert isinstance(
numpy_dict,
dict), "self.inputs, self.outputs must be numpy_dict"
# the inputs are as follows:
# case 1: inputs = {'X': x}
# case 2: inputs = {'X': (x, x_lod)}
# case 3: inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
# case 4: inputs = {'X': [("x1", (x1, [x1_lod1])), ("x2", (x2, [x2_.lod2]))]}
# TODO(juncaipeng) infer dtype from inputs maybe obtain wrong type.
for _, var_value in six.iteritems(numpy_dict):
if is_np_data(var_value): # case 1
dtype_set.add(var_value.dtype)
elif isinstance(var_value, (list, tuple)): # case 2, 3, 4
for sub_val_value in var_value:
if is_np_data(sub_val_value): # case 2
dtype_set.add(sub_val_value.dtype)
elif len(sub_val_value) > 1 and is_np_data(
sub_val_value[1]): # case 3
dtype_set.add(sub_val_value[1].dtype)
elif len(sub_val_value) > 1 and isinstance(sub_val_value[1], (list, tuple)) \
and is_np_data(sub_val_value[1][0]): # case 4
dtype_set.add(sub_val_value[1][0].dtype)
# infer dtype from inputs, and dtype means the precision of the test
# collect dtype of all inputs
dtype_set = set()
infer_dtype(inputs, dtype_set)
dtype_list = [
np.dtype(np.float64), np.dtype(np.float32), np.dtype(np.float16),
np.dtype(np.int64), np.dtype(np.int32), np.dtype(np.int16),
np.dtype(np.int8), np.dtype(np.uint8), np.dtype(np.bool)
]
# check the dtype in dtype_list in order, select the first dtype that in dtype_set
for dtype in dtype_list:
if dtype in dtype_set:
self.dtype = dtype
break
# save dtype in class attr
self.__class__.dtype = self.dtype
def feed_var(self, input_vars, place):
feed_map = {}
for var_name in input_vars:
if isinstance(input_vars[var_name], list):
for name, np_value in self.inputs[var_name]:
tensor = core.LoDTensor()
if isinstance(np_value, tuple):
tensor.set(np_value[0], place)
tensor.set_recursive_sequence_lengths(np_value[1])
else:
tensor.set(np_value, place)
feed_map[name] = tensor
else:
tensor = core.LoDTensor()
if isinstance(self.inputs[var_name], tuple):
tensor.set(self.inputs[var_name][0], place)
tensor.set_recursive_sequence_lengths(self.inputs[var_name][
1])
else:
tensor.set(self.inputs[var_name], place)
feed_map[var_name] = tensor
return feed_map
def _append_ops(self, block):
self.__class__.op_type = self.op_type # for ci check, please not delete it for now
if hasattr(self, "use_mkldnn"):
self.__class__.use_mkldnn = self.use_mkldnn
op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)
"infer datatype from inputs and outputs for this test case"
self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
inputs = append_input_output(block, op_proto, self.inputs, True,
self.dtype)
outputs = append_input_output(block, op_proto, self.outputs, False,
self.dtype)
if hasattr(self, "cache_name_list"):
for name in self.cache_name_list:
inputs[name] = block.create_var(
name=name,
persistable=True,
type=core.VarDesc.VarType.RAW,
stop_gradient=True)
op = block.append_op(
type=self.op_type,
inputs=inputs,
outputs=outputs,
attrs=self.attrs if hasattr(self, "attrs") else dict())
# infer variable type and infer shape in compile-time
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
return op
def _get_io_vars(self, block, numpy_inputs):
inputs = {}
for name, value in six.iteritems(numpy_inputs):
if isinstance(value, list):
var_list = [
block.var(sub_name) for sub_name, sub_value in value
]
inputs[name] = var_list
else:
inputs[name] = block.var(name)
return inputs
def _get_inputs(self, block):
return self._get_io_vars(block, self.inputs)
def _get_outputs(self, block):
return self._get_io_vars(block, self.outputs)
def calc_output(self, place):
outs, _ = self._calc_output(place)
return outs
def _create_var_from_numpy(self, value):
if isinstance(value, tuple):
data = value[0]
lod = value[1]
v = fluid.dygraph.base.to_variable(value=data)
v.value().get_tensor().set_recursive_sequence_lengths(lod)
return v
else:
return fluid.dygraph.base.to_variable(value)
def append_input_output_for_dygraph(self, op_proto, np_list, is_input,
if_return_inputs_grad_dict, block):
def create_var(np_value, name, is_input, if_return_inputs_grad_dict):
np_value_temp = np_value
has_lod = False
lod_temp = None
if isinstance(np_value, tuple):
np_value_temp = np_value[0]
has_lod = True
lod_temp = np_value[1]
if is_input:
v = self._create_var_from_numpy(np_value_temp)
if if_return_inputs_grad_dict:
v.stop_gradient = False
if has_lod:
v.value().get_tensor().set_recursive_sequence_lengths(
lod_temp)
else:
v = block.create_var(
name=name,
dtype=np_value_temp.dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=False)
return v
# prepare variable for input or output
var_dict = defaultdict(list)
if if_return_inputs_grad_dict:
inputs_grad_dict = defaultdict()
proto_list = op_proto.inputs if is_input else op_proto.outputs
for var_proto in proto_list:
name = var_proto.name
if (name not in np_list) and var_proto.dispensable:
continue
if name not in np_list:
assert var_proto.intermediate, "{} not found".format(name)
v = block.create_var(
dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR)
var_dict[name].append(v)
if if_return_inputs_grad_dict:
inputs_grad_dict[name] = v
continue
if var_proto.duplicable:
assert isinstance(
np_list[name],
list), "Duplicable {} should be set as list".format(name)
var_list = []
slot_name = name
for (name, np_value) in np_list[name]:
v = create_var(np_value, name, is_input,
if_return_inputs_grad_dict)
var_list.append(v)
if if_return_inputs_grad_dict:
inputs_grad_dict[name] = v
var_dict[slot_name] = var_list
else:
nplist_value_temp = None
name_temp = None
if isinstance(np_list[name], list):
nplist_value_temp = np_list[name][0]
name_temp = name
else:
nplist_value_temp = np_list[name]
name_temp = unique_name.generate("%s_out" % (name))
v = create_var(nplist_value_temp, name_temp, is_input,
if_return_inputs_grad_dict)
var_dict[name].append(v)
if if_return_inputs_grad_dict:
inputs_grad_dict[name] = v
if if_return_inputs_grad_dict:
return var_dict, inputs_grad_dict
else:
return var_dict
def _calc_dygraph_output(self, place, parallel=False, no_check_set=None):
self.__class__.op_type = self.op_type # for ci check, please not delete it for now
with fluid.dygraph.base.guard(place=place):
block = fluid.default_main_program().global_block()
op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)
# prepare input variable
inputs = self.append_input_output_for_dygraph(op_proto, self.inputs,
True, False, block)
# prepare output variable
outputs = self.append_input_output_for_dygraph(
op_proto, self.outputs, False, False, block)
# prepare attrbutes
attrs_outputs = {}
if hasattr(self, "attrs"):
for attrs_name in self.attrs:
if self.attrs[attrs_name] is not None:
attrs_outputs[attrs_name] = self.attrs[attrs_name]
block.append_op(
type=self.op_type,
inputs=inputs,
outputs=outputs,
attrs=attrs_outputs if hasattr(self, "attrs") else None)
return outputs
def _calc_output(self,
place,
parallel=False,
no_check_set=None,
loss=None,
enable_inplace=None,
for_inplace_test=None):
program = Program()
block = program.global_block()
op = self._append_ops(block)
inputs = self._get_inputs(block)
outputs = self._get_outputs(block)
feed_map = self.feed_var(inputs, place)
if for_inplace_test:
# Some variables' tensors hold no buffer (tensor's _holder is NULL), like XShape in reshape2 op,
# and the shapes of those variables contain 0 (eg. Xshape.shape = [0, 2, 5]).
# Set persistable for those variables in order to get them from global_scope for inplace grad test directly other than feed them,
# since feed op calls check_memory_size() which fails when tensor's holder_ is NULL.
for out_name in op.output_arg_names:
var = block.var(out_name)
if 0 in var.shape:
var.persistable = True
original_program = program
if parallel:
use_cuda = False
if isinstance(place, fluid.CUDAPlace):
use_cuda = True
compiled_prog = fluid.CompiledProgram(program).with_data_parallel(
loss_name=loss.name if loss else None, places=place)
program = compiled_prog
fetch_list = getattr(self, "fetch_list", [])
# if the fetch_list is customized by user, we use it directly.
# if not, fill the fetch_list by the user configured outputs in test.
if len(fetch_list) == 0:
for var_name, var in six.iteritems(outputs):
if no_check_set is not None and var_name in no_check_set:
continue
if isinstance(var, list):
for v in var:
fetch_list.append(v.name)
else:
fetch_list.append(var.name)
# if the fetch_list still empty, fill the fetch_list by the operator output.
if len(fetch_list) == 0:
for out_name, out_dup in Operator.get_op_outputs(self.op_type):
fetch_list.append(str(out_name))
if enable_inplace is not None:
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = enable_inplace
compiled_prog = fluid.CompiledProgram(program).with_data_parallel(
build_strategy=build_strategy, places=place)
program = compiled_prog
executor = Executor(place)
outs = executor.run(program,
feed=feed_map,
fetch_list=fetch_list,
return_numpy=False)
self.op = op
self.program = original_program
if for_inplace_test:
return outs, fetch_list, feed_map, original_program, op.desc
else:
return outs, fetch_list
def _compare_expect_and_actual_outputs(self,
place,
fetch_list,
expect_outs,
actual_outs,
inplace_atol=None):
"""Compare expect outs and actual outs of an tested op.
Args:
place (CPUPlace | CUDAPlace): The place where the op runs.
fetch_list (list): The outputs of tested op.
expect_outs (list): The expect outs of tested op.
actual_outs (list): The actual outs of tested op.
inplace_atol (float): The tolerable error, only set when tested op doesn't ensure computational consistency, like group_norm op.
Returns:
None.
"""
# compare expect_outs and actual_outs
for i, name in enumerate(fetch_list):
# Note(zhiqiu): inplace_atol should be only set when op doesn't ensure
# computational consistency.
# When inplace_atol is not None, the inplace check uses numpy.allclose
# to check inplace result instead of numpy.array_equal.
if inplace_atol is not None:
self.assertTrue(
np.allclose(
np.array(expect_outs[i]),
np.array(actual_outs[i]),
atol=inplace_atol),
"Output (" + name + ") has diff at " + str(place) +
" when using and not using inplace" + "\nExpect " +
str(expect_outs[i]) + "\n" + "But Got" + str(actual_outs[i])
+ " in class " + self.__class__.__name__)
else:
self.assertTrue(
np.array_equal(
np.array(expect_outs[i]), np.array(actual_outs[i])),
"Output (" + name + ") has diff at " + str(place) +
" when using and not using inplace" + "\nExpect " +
str(expect_outs[i]) + "\n" + "But Got" + str(actual_outs[i])
+ " in class " + self.__class__.__name__ + '\n')
def _construct_grad_program_from_forward(self, fwd_program, grad_op_desc,
op_grad_to_var):
"""Generate grad_program which contains the grad_op.
Args:
fwd_program (tuple): The program that contains grad_op_desc's corresponding forward op.
grad_op_desc (OpDesc): The OpDesc of grad op.
op_grad_to_var (dict): The relation of variables in grad op and its forward op.
Returns:
grad_program (program): The program which contains the grad_op.
"""
grad_program = Program()
grad_block = grad_program.global_block()
new_op_desc = grad_block.desc.append_op()
new_op_desc.copy_from(grad_op_desc)
grad_program._sync_with_cpp()
# Create grad vars based on fwd vars (shape and dtype)
for arg in grad_op_desc.input_arg_names(
) + grad_op_desc.output_arg_names():
fwd_var_name = op_grad_to_var.get(arg, None)
if fwd_var_name is None:
fwd_var_name = arg
fwd_var = fwd_program.global_block().vars.get(fwd_var_name)
assert fwd_var is not None, "{} cannot be found".format(
fwd_var_name)
grad_var = grad_block.create_var(
name=arg,
dtype=fwd_var.dtype,
shape=fwd_var.shape,
type=fwd_var.type,
persistable=False)
# Some variables' tensors hold no buffer (tensor's _holder is NULL), like XShape in reshape2 op,
# and the shapes of those variables contain 0 (eg. Xshape.shape = [0, 2, 5]).
# Set persistable for those variables in order to get them from global_scope for inplace grad test directly other than feed them,
# since feed op calls check_memory_size() which fails when tensor's holder_ is NULL.
if 0 in grad_var.shape:
grad_var.persistable = True
grad_program._sync_with_cpp()
return grad_program
def _construct_grad_feed_map_from_forward(self, place, fwd_res,
grad_op_desc, op_grad_to_var):
"""Generate grad_feed_map for grad_program.
since we don`t really check gradient accuracy, but check the consistency when using and not using inplace,
we use fwd outs (also inputs sometimes) to construct grad inputs.
Args:
place (CPUPlace | CUDAPlace): The place where the op runs.
fwd_res (tuple): The outputs of its forward op, in the same form as returns of _calc_outputs() when for_inplace_test is True.
i.e., tuple(fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc)
grad_op_desc (OpDesc): The OpDesc of grad op.
op_grad_to_var (dict): The relation of variables in grad op and its fwd_op.
Returns:
grad_feed_map (dict): The feed_map of grad_op.
"""
fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc = fwd_res
p = core.Place()
p.set_place(place)
grad_feed_map = {}
for arg in grad_op_desc.input_arg_names():
if arg in fwd_feed_map.keys():
grad_feed_map[arg] = fwd_feed_map[arg]._copy(p)
else:
fwd_var_name = op_grad_to_var.get(arg, None)
if fwd_var_name is None:
fwd_var_name = arg
for i, out_name in enumerate(fwd_fetch_list):
if out_name == fwd_var_name:
# don't feed variables whose tensors hold no buffer (shape contains 0 like shape = [0,2,5] and holder_ is NULL), like XShape in reshape2 op.
# get them from global_scope directly since we have set them persistable in fwd execution
if 0 in fwd_program.global_block().var(out_name).shape:
continue
else:
grad_feed_map[arg] = fwd_outs[i]._copy(p)
return grad_feed_map
def _get_need_run_ops(self, op_desc, fwd_op_desc=None):
"""Postorder traversal of the 'grad' tree to get all ops that need to run during inplace test.
An op needs to run druing inplace check if,
(1) it has infer_inplace,
(2) it has infer_inplace in its grad descendants. (since we need its outputs as to construct its grad's inputs)
Args:
op_desc (OpDesc): The op_desc of current op.
fwd_op_desc (OpDesc): The op_desc of current op's forward op, None if current op has no forward op.
Eg. relu's fwd_op is None, relu_grad's fwd_op is relu, relu_grad_grad's fwd_op is relu_grad, etc.
Returns:
need_run_ops (list[(op_desc, fwd_op_desc)]): The ops that need to run during inplace test.
"""
need_run_ops = []
visited_ops = []
def _dfs_grad_op(op_desc, fwd_op_desc=None):
visited_ops.append(op_desc.type())
has_infer_inplace = fluid.core.has_infer_inplace(op_desc.type())
has_grad_op_maker = fluid.core.has_grad_op_maker(op_desc.type())
has_infer_inplace_in_grad_descendants = False
if not has_grad_op_maker:
has_infer_inplace_in_descendants = False
else:
# get grad_op_desc
grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
op_desc, set(), [])
if not grad_op_desc_list:
has_infer_inplace_in_grad_descendants = False
else:
for i, grad_op_desc in enumerate(grad_op_desc_list):
if grad_op_desc.type(
) not in visited_ops and _dfs_grad_op(
grad_op_desc, fwd_op_desc=op_desc):
has_infer_inplace_in_grad_descendants = True
if has_infer_inplace or has_infer_inplace_in_grad_descendants:
need_run_ops.append((op_desc, fwd_op_desc))
return True
else:
return False
_dfs_grad_op(op_desc, fwd_op_desc=fwd_op_desc)
return need_run_ops
def _check_forward_inplace(self,
place,
no_check_set=None,
inplace_atol=None):
"""Chech the inplace correctness of given op (self.op_type).
Run the op twice with same inputs, one enable inplace and another disable, compare their outputs.
Args:
place (CPUPlace | CUDAPlace): The place where the op runs.
no_check_set (list): The names of outputs that needn't check, like XShape of reshape op.
inplace_atol (float): The tolerable error, only set when op doesn't ensure computational consistency, like group_norm op.
Returns:
expect_res (tuple(outs, fetch_list, feed_map, program, op_desc)): The results of given op.
We return this to construct grad_program and grad_feed_map for grad inplace check.
"""
# _calc_output() returns in the form tuple(outs, fetch_list, feed_map, program, op_desc) when for_inplace_test=True.
expect_res = self._calc_output(
place,
no_check_set=no_check_set,
enable_inplace=False,
for_inplace_test=True)
actual_res = self._calc_output(
place,
no_check_set=no_check_set,
enable_inplace=True,
for_inplace_test=True)
# compare expect_outs and actual_outs
self._compare_expect_and_actual_outputs(
place,
expect_res[1],
expect_res[0],
actual_res[0],
inplace_atol=inplace_atol)
return expect_res
def _calc_grad_output(self,
place,
fwd_res,
grad_op_desc,
enable_inplace=None):
"""Calculate grad_output for given grad_op_desc.
since we don`t really check gradient accuracy, but check the consistency when using and not using inplace,
we use fwd outs (also inputs sometimes) to construct grad inputs.
Args:
place (CPUPlace | CUDAPlace): The place where the op runs.
fwd_res (tuple): The outputs of its forward op, in the same form as returns of _calc_outputs() when for_inplace_test is True.
i.e., tuple(fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc).
grad_op_desc (OpDesc): The OpDesc of grad op.
enable_inplace (bool): Enable inplace or not.
Returns:
res (tuple(outs, fetch_list, feed_map, program, op_desc)): The results of given grad_op_desc.
"""
fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc = fwd_res
grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(fwd_op_desc,
set(), [])
grad_program = self._construct_grad_program_from_forward(
fwd_program, grad_op_desc, op_grad_to_var)
grad_feed_map = self._construct_grad_feed_map_from_forward(
place, fwd_res, grad_op_desc, op_grad_to_var)
grad_fetch_list = grad_op_desc.output_arg_names()
exe = Executor(place)
program = grad_program
if enable_inplace is not None:
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = enable_inplace
compiled_program = fluid.CompiledProgram(
grad_program).with_data_parallel(
loss_name="", build_strategy=build_strategy, places=place)
program = compiled_program
outs = exe.run(program,
feed=grad_feed_map,
fetch_list=grad_fetch_list,
return_numpy=False)
return outs, grad_fetch_list, grad_feed_map, grad_program, grad_op_desc
def _check_grad_inplace(self,
place,
fwd_res,
grad_op_desc,
inplace_atol=None):
"""Chech the inplace correctness of given grad_op_desc.
Run the grad op twice with same inputs, one enable inplace and another disable, compare their outputs.
It works like _check_forward_inplace, but the way to construct program and feed_map differs.
So we define a new function for grad, grad_grad, etc.
Args:
place (CPUPlace | CUDAPlace): The place where the op runs.
fwd_res (tuple): The outputs of its forward op, in the same form as returns of _calc_outputs() when for_inplace_test is True.
i.e., tuple(fwd_outs, fwd_fetch_list, fwd_feed_map, fwd_program, fwd_op_desc).
grad_op_desc (OpDesc): The OpDesc of grad op.
inplace_atol (float): The tolerable error, only set when op doesn't ensure computational consistency, like group_norm op.
Returns:
expect_res (tuple(outs, fetch_list, feed_map, program, op_desc)): The results of given op.
We return this to construct grad_program and grad_feed_map for grad inplace check.
"""
expect_res = self._calc_grad_output(
place, fwd_res, grad_op_desc, enable_inplace=False)
actual_res = self._calc_grad_output(
place, fwd_res, grad_op_desc, enable_inplace=True)
self._compare_expect_and_actual_outputs(
place,
expect_res[1],
expect_res[0],
actual_res[0],
inplace_atol=inplace_atol)
return expect_res
def check_inplace_output_with_place(self,
place,
no_check_set=None,
inplace_atol=None):
"""Chech the inplace correctness of given op, its grad op, its grad_grad op, etc.
(1) Get all ops need to run. (see conditions in _get_need_run_ops())
(2) Run op in need_run_ops, and do inplace check if it has infer_inplace.
Args:
place (CPUPlace | CUDAPlace): The place where the op runs.
no_check_set (list): The names of outputs that needn't check, like XShape of reshape op.
inplace_atol (float): The tolerable error, only set when op doesn't ensure computational consistency, like group_norm op.
Returns:
None
"""
has_infer_inplace = fluid.core.has_infer_inplace(self.op_type)
has_grad_op_maker = fluid.core.has_grad_op_maker(self.op_type)
fwd_res = self._calc_output(
place, no_check_set=no_check_set, for_inplace_test=True)
op_desc = fwd_res[4]
need_run_ops = self._get_need_run_ops(op_desc)
res = {}
for op_desc, father_op_desc in reversed(need_run_ops):
# The first one is the forward op
has_infer_inplace = fluid.core.has_infer_inplace(op_desc.type())
if op_desc.type() == self.op_type:
if has_infer_inplace:
res[op_desc] = self._check_forward_inplace(
place,
no_check_set=no_check_set,
inplace_atol=inplace_atol)
else:
res[op_desc] = self._calc_output(
place, no_check_set=no_check_set, for_inplace_test=True)
else:
# TODO(zhiqiu): enhance inplace_grad test for ops (sum and activation) using mkldnn/ngraph
# skip op that use_mkldnn and use_ngraph currently
flags_use_mkldnn = fluid.core.globals()["FLAGS_use_mkldnn"]
attrs_use_mkldnn = hasattr(
self,
'attrs') and bool(self.attrs.get('use_mkldnn', False))
if flags_use_mkldnn or attrs_use_mkldnn:
warnings.warn(
"check inplace_grad for ops using mkldnn is not supported"
)
continue
use_ngraph = fluid.core.is_compiled_with_ngraph(
) and fluid.core.globals()["FLAGS_use_ngraph"]
if use_ngraph:
warnings.warn(
"check inplace_grad for ops using ngraph is not supported"
)
continue
if has_infer_inplace:
fwd_res = res[father_op_desc]
res[op_desc] = self._check_grad_inplace(
place, fwd_res, op_desc, inplace_atol=inplace_atol)
else:
res[op_desc] = self._calc_grad_output(place, fwd_res,
op_desc)
def check_output_with_place(self,
place,
atol,
no_check_set=None,
equal_nan=False,
check_dygraph=True,
inplace_atol=None):
if no_check_set is not None:
if self.op_type not in no_check_set_white_list.no_check_set_white_list:
raise AssertionError(
"no_check_set of op %s must be set to None." % self.op_type)
if check_dygraph:
dygraph_outs = self._calc_dygraph_output(
place, no_check_set=no_check_set)
outs, fetch_list = self._calc_output(place, no_check_set=no_check_set)
for out_name, out_dup in Operator.get_op_outputs(self.op_type):
if out_name not in self.outputs:
continue
if no_check_set is not None and out_name in no_check_set:
continue
def find_imperative_actual(target_name, dygraph_outs, place):
with fluid.dygraph.base.guard(place=place):
for name in dygraph_outs:
if name == target_name:
return dygraph_outs[name][0]
var_list = dygraph_outs[name]
for i, var in enumerate(var_list):
if var.name == target_name:
return dygraph_outs[name][i]
self.assertTrue(False, "Found failed {} {}".format(
dygraph_outs.keys(), target_name))
def find_actual(target_name, fetch_list):
found = [
i for i, var_name in enumerate(fetch_list)
if var_name == target_name
]
self.assertTrue(
len(found) == 1, "Found {} {}".format(
len(found), target_name))
return found[0]
if out_dup:
sub_out = self.outputs[out_name]
if not isinstance(sub_out, list):
raise AssertionError("sub_out type %s is not list",
type(sub_out))
for item in sub_out:
sub_out_name, expect = item[0], item[1]
if check_dygraph:
imperative_actual = find_imperative_actual(
sub_out_name, dygraph_outs, place)
imperative_actual_t = np.array(imperative_actual.value()
.get_tensor())
idx = find_actual(sub_out_name, fetch_list)
actual = outs[idx]
actual_t = np.array(actual)
expect_t = expect[0] \
if isinstance(expect, tuple) else expect
self.assertTrue(
np.allclose(
actual_t, expect_t, atol=atol, equal_nan=equal_nan),
"Output (" + sub_out_name + ") has diff at " +
str(place))
if check_dygraph:
self.assertTrue(
np.allclose(
imperative_actual_t,
expect_t,
atol=atol,
equal_nan=equal_nan),
"Output (" + sub_out_name + ") has diff at " +
str(place) + " in dygraph mode")
if isinstance(expect, tuple):
self.assertListEqual(
actual.recursive_sequence_lengths(), expect[1],
"Output (" + sub_out_name +
") has different lod at " + str(place))
if check_dygraph:
self.assertListEqual(
imperative_actual.value().get_tensor()
.recursive_sequence_lengths(), expect[1],
"Output (" + out_name +
") has different lod at " + str(place) +
" in dygraph mode")
else:
if check_dygraph:
imperative_actual = find_imperative_actual(
out_name, dygraph_outs, place)
imperative_actual_t = np.array(imperative_actual.value()
.get_tensor())
idx = find_actual(out_name, fetch_list)
actual = outs[idx]
actual_t = np.array(actual)
expect = self.outputs[out_name]
expect_t = expect[0] if isinstance(expect, tuple) else expect
self.assertTrue(
np.allclose(
actual_t, expect_t, atol=atol, equal_nan=equal_nan),
"Output (" + out_name + ") has diff at " + str(place) +
"\nExpect " + str(expect_t) + "\n" + "But Got" +
str(actual_t) + " in class " + self.__class__.__name__)
if check_dygraph:
if six.moves.reduce(
lambda x, y: x * y, imperative_actual_t.shape,
1) == 0 and six.moves.reduce(
lambda x, y: x * y, expect_t.shape, 1) == 0:
pass
else:
self.assertTrue(
np.allclose(
imperative_actual_t,
expect_t,
atol=atol,
equal_nan=equal_nan),
"Output (" + out_name + ") has diff at " +
str(place) + "\nExpect " + str(expect_t) + "\n" +
"But Got" + str(imperative_actual_t) + " in class "
+ self.__class__.__name__)
if isinstance(expect, tuple):
self.assertListEqual(actual.recursive_sequence_lengths(),
expect[1], "Output (" + out_name +
") has different lod at " + str(place))
if check_dygraph:
self.assertListEqual(
imperative_actual.value().get_tensor()
.recursive_sequence_lengths(), expect[1],
"Output (" + out_name + ") has different lod at " +
str(place) + " in dygraph mode")
# Note(zhiqiu): inplace_atol should be only set when op doesn't ensure
# computational consistency.
# For example, group_norm uses AtomicAdd on CUDAPlace, which do not ensure
# computation order when multiple threads write the same address. So the
# result of group_norm is non-deterministic when datatype is float.
# When inplace_atol is not None, the inplace check uses numpy.allclose
# to check inplace result instead of numpy.array_equal.
if inplace_atol is not None:
warnings.warn(
"inplace_atol should only be set when op doesn't ensure computational consistency, please check it!"
)
# Check inplace for given op, its grad op, its grad_grad op, etc.
# No effect on original OpTest
self.check_inplace_output_with_place(
place, no_check_set=no_check_set, inplace_atol=inplace_atol)
if check_dygraph:
return outs, dygraph_outs, fetch_list
else:
return outs, fetch_list
def check_compile_vs_runtime(self, fetch_list, fetch_outs):
def find_fetch_index(target_name, fetch_list):
found = [
i for i, var_name in enumerate(fetch_list)
if var_name == target_name
]
if len(found) == 0:
return -1
else:
self.assertTrue(
len(found) == 1,
"Found {} {}".format(len(found), target_name))
return found[0]
for name in self.op.desc.output_names():
var_names = self.op.desc.output(name)
for var_name in var_names:
i = find_fetch_index(var_name, fetch_list)
if i == -1:
# The output is dispensiable or intermediate.
break
out = fetch_outs[i]
if isinstance(out, core.LoDTensor):
lod_level_runtime = len(out.lod())
else:
if isinstance(out, core.LoDTensorArray):
warnings.warn(
"The check of LoDTensorArray's lod_level is not implemented now!"
)
lod_level_runtime = 0
var = self.program.global_block().var(var_name)
if var.type == core.VarDesc.VarType.LOD_TENSOR:
lod_level_compile = var.lod_level
else:
lod_level_compile = 0
self.assertEqual(
lod_level_compile, lod_level_runtime,
"The lod_level of Output (" + name +
") is different between compile-time and runtime (" +
str(lod_level_compile) + " vs " + str(lod_level_runtime) +
")")
def _get_places(self):
if self.dtype == np.float16:
if core.is_compiled_with_cuda() and core.op_support_gpu(
self.op_type):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
return [place]
else:
return []
else:
return []
places = [fluid.CPUPlace()]
cpu_only = self._cpu_only if hasattr(self, '_cpu_only') else False
use_ngraph = fluid.core.is_compiled_with_ngraph(
) and fluid.core.globals()['FLAGS_use_ngraph']
if use_ngraph:
cpu_only = True
if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type)\
and not cpu_only:
places.append(core.CUDAPlace(0))
return places
def check_output(self,
atol=1e-5,
no_check_set=None,
equal_nan=False,
check_dygraph=True,
inplace_atol=None,
check_compile_vs_runtime=True):
self.__class__.op_type = self.op_type
if hasattr(self, "use_mkldnn"):
self.__class__.use_mkldnn = self.use_mkldnn
places = self._get_places()
for place in places:
res = self.check_output_with_place(place, atol, no_check_set,
equal_nan, check_dygraph)
if check_dygraph:
outs, dygraph_outs, fetch_list = res
else:
outs, fetch_list = res
if check_compile_vs_runtime and (
self.op_type not in
compile_vs_runtime_white_list.COMPILE_RUN_OP_WHITE_LIST):
self.check_compile_vs_runtime(fetch_list, outs)
def check_output_customized(self, checker):
places = self._get_places()
for place in places:
outs = self.calc_output(place)
outs = [np.array(out) for out in outs]
outs.sort(key=len)
checker(outs)
def _assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
for a, b, name in six.moves.zip(numeric_grads, analytic_grads, names):
abs_a = np.abs(a)
abs_a[abs_a < 1e-3] = 1
diff_mat = | np.abs(a - b) | numpy.abs |
# -*- coding: utf-8 -*-
"""
This is the element routine file wherein Jacobian, strain disp. mat,
elemental stiffness, elemental F_int are computed.
The material routine is called in F_e_int() function, which returns the stress,
overstress and material tangent stiffness .
@author: Danush
"""
import numpy as np
from material_routine import *
##############################################################################
# N- shape functions for the given problem
si = 0
N = np.array([(1/2)*(1-si),(1/2)*(1+si)]).reshape(2,1)
#computation of jacobian
def jacobian_of_elements(r_elements):
"""This function returns the Jacobian for the problem.
Input---> r_elements, depending on the meshgenerator
Output--> Jacobian"""
J = (r_elements[0,1] - r_elements[0,0])/2
return J
# B strain displacement matrix
def strain_disp_mat(r_elements):
"""This function returns the strain displacement matrix.
Input----> r_elements,N-shape functions
Output---> B-strain displacement matrix(2x2)"""
jacobian = jacobian_of_elements(r_elements)
B = np.array([-1/(2*jacobian),1/(2*jacobian),
N[0]/(N[0]*r_elements[0,0] + N[1]*r_elements[0,1]),
N[1]/(N[0]*r_elements[0,0] + N[1]*r_elements[0,1])]).reshape(2,2)
return B
# The no. of Gauss points for the problem is 1 , so w is assigned the value 2
# gauss points = 1
w = 2
# Elemental stiffness matrix
def k_e_mat(B,r,C_t,jacobian,w):
"""This function returns the elemental stiffness matrix
which is called inside F_e_int function.
Input----> B-strain disp.Mat,r-radius from mesh generator,
C_t-Tangent stiffness Mat.,jacobian,w
Output---> K_elemental"""
k = np.array([w *(B.transpose().dot(C_t.dot(B)))*(N.transpose().dot(r.transpose()))
* jacobian]).reshape(2,2)
return k
#Elemental Assignment matrix
def a_e_mat(n_e):
"""This function returns the elemental assignment matrix
for the required no. of elements.
Input-----> n_e no. of elements
Output----> Elemental Assignment matrix"""
B = np.zeros((2,n_e+1))
h =[]
for i in range(0,n_e):
B[0][i] = 1
B[1][i+1] = 1
h.append(B)
B = | np.zeros((2,n_e+1)) | numpy.zeros |
import numpy as np
import scipy
import scipy.optimize
def cov_se(x, xp, lengthscales, signal):
return signal**2 * np.exp(-0.5 * np.linalg.norm((x - xp)/lengthscales)**2)
def cov_main(str_cov, X, Xs, hyps, jitter=1e-5):
num_X = X.shape[0]
num_d_X = X.shape[1]
num_Xs = Xs.shape[0]
num_d_Xs = Xs.shape[1]
if num_d_X != num_d_Xs:
print('ERROR: matrix dimensions: ', num_d_X, num_d_Xs)
raise ValueError('matrix dimensions are different.')
cov_ = np.zeros((num_X, num_Xs))
if num_X == num_Xs:
cov_ += np.eye(num_X) * jitter
if str_cov == 'se':
if hyps.get('lengthscales') is None or hyps.get('signal') is None:
raise ValueError('hyperparameters are insufficient.')
for ind_X in range(0, num_X):
for ind_Xs in range(0, num_Xs):
cov_[ind_X, ind_Xs] += cov_se(X[ind_X], Xs[ind_Xs], hyps['lengthscales'], hyps['signal'])
else:
raise ValueError('kernel is inappropriate.')
return cov_
def get_hyps(str_cov, num_dim):
hyps = dict()
hyps['noise'] = 0.1
if str_cov == 'se':
hyps['signal'] = 1.0
hyps['lengthscales'] = np.zeros(num_dim) + 1.0
else:
raise ValueError('kernel is inappropriate.')
return hyps
def convert_hyps(str_cov, hyps):
list_hyps = []
list_hyps.append(hyps['noise'])
if str_cov == 'se':
list_hyps.append(hyps['signal'])
for elem_lengthscale in hyps['lengthscales']:
list_hyps.append(elem_lengthscale)
else:
raise ValueError('kernel is inappropriate.')
return np.array(list_hyps)
def restore_hyps(str_cov, hyps):
hyps = hyps.flatten()
dict_hyps = dict()
dict_hyps['noise'] = hyps[0]
if str_cov == 'se':
dict_hyps['signal'] = hyps[1]
list_lengthscales = []
for ind_elem in range(2, len(hyps)):
list_lengthscales.append(hyps[ind_elem])
dict_hyps['lengthscales'] = np.array(list_lengthscales)
else:
raise ValueError('kernel is inappropriate.')
return dict_hyps
def get_prior_mu(prior_mu, X):
if prior_mu is None:
prior_mu_X = np.zeros((X.shape[0], 1))
else:
prior_mu_X = prior_mu(X)
return prior_mu_X
def get_kernels(X_train, hyps, str_cov):
cov_X_X = cov_main(str_cov, X_train, X_train, hyps) + hyps['noise']**2 * | np.eye(X_train.shape[0]) | numpy.eye |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = | N.array([2,2,2]) | numpy.array |
import numpy as np
import matplotlib.patches
# https://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html
class Arrow3D(matplotlib.patches.FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
matplotlib.patches.FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
from mpl_toolkits.mplot3d import proj3d
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
matplotlib.patches.FancyArrowPatch.draw(self, renderer)
def pathpatch_2d_to_3d_affine(pathpatch, mat_rot=np.array([[1,0,0],[0,1,0],[0,0,1]]), vec_trans=np.array([0,0,0])):
"""
Transforms a 2D Patch to a 3D patch using the affine tranform
of the given rotation matrix and translation vector.
The pathpatch is assumed to be on the plane Z = 0.
"""
import mpl_toolkits.mplot3d.art3d as art3d
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
M = np.array([
[mat_rot[0, 0], mat_rot[0, 1], mat_rot[0, 2], vec_trans[0]],
[mat_rot[1, 0], mat_rot[1, 1], mat_rot[1, 2], vec_trans[1]],
[mat_rot[2, 0], mat_rot[2, 1], mat_rot[2, 2], vec_trans[2]],
])
pathpatch._segment3d = np.array([np.dot(M, (x, y, 0, 1)) for x, y in verts])
def plot_geometry(geo, angle=0):
"""
Plots the given geometry.
"""
import mpl_toolkits.mplot3d.art3d as art3d
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(6,6))
fig.suptitle('Cone Beam Compute Tomography geometry')
ax = fig.add_subplot(111, projection='3d')
ax.set_title('Current CBCT geometry, in scale')
limXY = max(geo.DSO, geo.DSD-geo.DSO)
limZ = geo.sVoxel[0]
ax.set_box_aspect((limXY,limXY,limZ))
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim3d(-limXY*1.2, limXY*1.2)
ax.set_ylim3d(-limXY*1.2, limXY*1.2)
ax.set_zlim3d(-limZ *1.2, limZ *1.2)
# Trajectory of Source
# https://matplotlib.org/devdocs/api/_as_gen/matplotlib.patches.Circle.htm
circ = matplotlib.patches.Circle((0,0), geo.DSO, color='black', fill=False, ls='-.', lw=0.5)
ax.add_patch(circ)
art3d.pathpatch_2d_to_3d(circ, z=0, zdir="z")
# Trajectory of Detector
circ = matplotlib.patches.Circle((0,0), geo.DSD-geo.DSO, color='black', fill=False, ls='-.', lw=0.5)
ax.add_patch(circ)
art3d.pathpatch_2d_to_3d(circ, z=0, zdir="z")
# Source
# ax.scatter([0], [0], [0], color="g", s=100)
sourcePos3D = [geo.DSO*np.cos(angle), geo.DSO*np.sin(angle), 0] # xyz
ax.scatter([sourcePos3D[0]], [sourcePos3D[1]], [sourcePos3D[2]], color="steelblue", s=100)
# Axes XYZ
length_axis = geo.sVoxel[0]
x_axis = Arrow3D([0, length_axis], [0, 0], [0, 0], mutation_scale=10, shrinkA=0, lw=1, arrowstyle="-|>", color="r")
y_axis = Arrow3D([0, 0], [0, length_axis], [0, 0], mutation_scale=10, shrinkA=0, lw=1, arrowstyle="-|>", color="b")
z_axis = Arrow3D([0, 0], [0, 0], [0, length_axis], mutation_scale=10, shrinkA=0, lw=1, arrowstyle="-|>", color="g")
ax.add_artist(x_axis)
ax.add_artist(y_axis)
ax.add_artist(z_axis)
# Detector
print("sDetector = {}".format(geo.sDetector))
alpha_detector = 0.7
detectorPos3D = [
(geo.DSD-geo.DSO)* | np.cos(angle+np.pi) | numpy.cos |
import random
import matplotlib.pyplot as plt
import utils.utilities as ut
import os
from sklearn.model_selection import train_test_split
import numpy as np
import network
import torch
import torchvision
n_epochs = 3
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 10
random_seed = 1
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('data/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('data/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
def show_torch_image(torch_tensor):
plt.imshow(torch_tensor.numpy().reshape(28, 28), cmap='gray')
plt.show()
return None
def initialize(trn_x, val_x, trn_y, val_y):
trn_x_torch = torch.from_numpy(trn_x).type(torch.FloatTensor)
trn_y_torch = torch.from_numpy(trn_y)
val_x_torch = torch.from_numpy(val_x).type(torch.FloatTensor)
val_y_torch = torch.from_numpy(val_y)
trn = torch.utils.data.TensorDataset(trn_x_torch,trn_y_torch)
val = torch.utils.data.TensorDataset(val_x_torch,val_y_torch)
trn_dataloader = torch.utils.data.DataLoader(trn,batch_size=100,shuffle=False, num_workers=4)
val_dataloader = torch.utils.data.DataLoader(val,batch_size=100,shuffle=False, num_workers=4)
return trn_x_torch, val_x_torch, trn_dataloader, val_dataloader
# Set file paths based on added MNIST Datasets
input_path = 'data'
training_images_filepath = os.path.join(input_path, 'train-images-idx3-ubyte')
training_labels_filepath = os.path.join(input_path, 'train-labels-idx1-ubyte')
test_images_filepath = os.path.join(input_path, 't10k-images-idx3-ubyte')
test_labels_filepath = os.path.join(input_path, 't10k-labels-idx1-ubyte')
#print(training_images_filepath)
# Helper function to show a list of images with their relating titles
def show_images(images, title_texts):
cols = 5
rows = int(len(images)/cols) + 1
plt.figure(figsize=(30,20))
index = 1
for x in zip(images, title_texts):
image = x[0]
title_text = x[1]
plt.subplot(rows, cols, index)
plt.imshow(image, cmap=plt.cm.gray)
if (title_text != ''):
plt.title(title_text, fontsize = 15);
index += 1
plt.show()
#training the model
def train(epoch):
AE.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = AE(data)
loss = torch.nn.functional.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset),100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append((batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
torch.save(network.state_dict(), 'models/model.pth')
torch.save(optimizer.state_dict(), 'models/optimizer.pth')
#testing the model
def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in val_dataloader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(val_dataloader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(val_dataloader.dataset), 100. * correct / len(val_dataloader.dataset)))
###########################################################################
###########################################################################
###########################################################################
# Load MINST dataset
mnist_dataloader = ut.MnistDataloader(training_images_filepath, training_labels_filepath, test_images_filepath, test_labels_filepath)
(x_train, y_train), (x_test, y_test) = mnist_dataloader.load_data()
x_train = | np.array(x_train) | numpy.array |
# Implementation of various helper functions
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi
from scipy.integrate import quad
from scipy.special import kve
import sys
# Electron rest energy in eV
mc2 = 0.51099895000e6
def averagedIonizationCrossSection(T, C, DI_eV, betaStar):
"""
"""
c = 299792458.0
nT = T.size
I_i = | np.zeros(nT) | numpy.zeros |
#!/usr/bin/env python
#####################
# Simple MD program #
#####################
import time
import numpy as np
def create_molecule(n=3, element='He'):
"""
Create a molecule as atoms in a cube.
Parameters
----------
n: integer
number of atoms in each dimension of the cube
element: string
The element of all atoms in this molecule
Returns
-------
coords: numpy.ndarray of shape (n**3, 3)
Numpy array of atomic coordinates
elems: list of strings
List of elements for all atoms
"""
coords = np.array([[x,y,z] for x in range(n) for y in range(n) for z in range(n)], dtype=float)
elems = [element] * len(coords)
return coords, elems
def ref_LJ_force(coords, epsilon=1.0, sigma=1.0):
""" Compute the LJ force for the molecule in current geometry.
Parameters
----------
coords: Numpy.ndarray of shape (Natoms, 3)
Numpy array of atomic coordinates
epsilon: float
epsilon parameter in LJ force formula
sigma: float
sigma parameter in LJ force formula
Returns
-------
force: Numpy.ndarray of shape (Natoms, 3)
Numpy array of gradients on each atom
Reference
---------
The LJ force takes the formula:
Fij = (-12 x sigma^12 / rij^14 + 6 x sigma^6 / rij^8) * 4 * epsilon
"""
noa = len(coords)
s6 = sigma**6
forces = np.zeros((noa,3))
for i in range(noa):
for j in range(i+1,noa):
dc = coords[i] - coords[j]
r2 = dc[0]*dc[0] + dc[1]*dc[1] + dc[2]*dc[2]
f = (-12 / r2**7 * s6 + 6 / r2**4) * 4 * epsilon * s6
forces[i] += f * dc
forces[j] -= f * dc
return forces
def numpy_LJ_force(coords, epsilon=1.0, sigma=1.0):
""" Compute the LJ force for the molecule in current geometry.
Parameters
----------
coords: Numpy.ndarray of shape (Natoms, 3)
Numpy array of atomic coordinates
epsilon: float
epsilon parameter in LJ force formula
sigma: float
sigma parameter in LJ force formula
Returns
-------
force: Numpy.ndarray of shape (Natoms, 3)
Numpy array of gradients on each atom
Reference
---------
The LJ force takes the formula:
Fij = (-12 x sigma^12 / rij^14 + 6 x sigma^6 / rij^8) * 4 * epsilon
"""
# compute the distance between each atom pairs
c_diff = coords[:,np.newaxis,:] - coords[np.newaxis,:,:]
r2_mat = np.sum(np.square(c_diff), axis=-1)
# prepare values for the LJ force formula
s6 = sigma**6
r2_mat2 = np.square(r2_mat)
| np.fill_diagonal(r2_mat2, 1.0) | numpy.fill_diagonal |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 27 15:03:13 2019
@author: Deng
"""
import numpy as np
#--------------------------- load feature -----------------#
# load feature
gf_1 = np.load('./data/gf_1.npy')
gf_1_n = np.linalg.norm(gf_1, axis=1, keepdims=True)
gf_1 = gf_1 / gf_1_n
qf_1 = np.load('./data/qf_1.npy')
qf_1_n = np.linalg.norm(qf_1, axis=1, keepdims=True)
qf_1 = qf_1 / qf_1_n
# model 2
gf_2 = | np.load('./data/gf_2.npy') | numpy.load |
import logging
import random
import urllib
from enum import Enum, auto
from typing import Any, List
from zipfile import ZipFile
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from diskcache import Cache
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
from ca.difficulty.difficulty_estimators import (
BertDifficultyEstimator, FleschKincaidDifficultyEstimator,
MaskedLanguageModelScorer, UncertaintyDifficultyEstimator)
from ca.featurizer import CachedSentenceTransformer, SentenceFeature
from ca.models.neuralnets.lstm_tagger import LSTMTagger
from ca.models.neuralnets.mlp_classifier import MLPClassifier
from ca.paths import PATH_CACHE, PATH_CACHE_GLOVE
class StrategyType(Enum):
RANDOM = auto()
SENTENCE_LENGTH = auto()
FLESCH_KINCAID_GRADE_LEVEL = auto()
FLESCH_KINCAID_READING_EASE = auto()
BERT_PREDICTION_DIFFICULTY = auto()
MASKED_LANGUAGE_MODEL_SCORE = auto()
ANNOTATION_TIME = auto()
MODEL_DIFFICULTY = auto()
class Strategy:
def __init__(self):
self._episode = 0
def init_tagging(
self,
X_so_far: List[List[List[str]]],
Xf_so_far: List[Any],
y_so_far: List[List[List[str]]],
X_unchosen: List[List[List[str]]],
Xf_unchosen: List[Any],
X_eval: List[List[str]],
Xf_eval: List[Any],
):
pass
def init_document_classification(
self,
X_so_far: List[str],
Xf_so_far: List[np.ndarray],
y_so_far: List[List[str]],
X_unchosen: List[str],
Xf_unchosen: List[np.ndarray],
X_eval: List[str],
Xf_eval: List[np.ndarray],
):
pass
def init_pairwise(
self,
X_so_far: List[str],
Xf_so_far: List[np.ndarray],
y_so_far: List[List[str]],
X_unchosen: List[str],
Xf_unchosen: List[np.ndarray],
X_eval: List[str],
Xf_eval: List[np.ndarray],
):
pass
def select(self) -> int:
raise NotImplementedError()
def argsort_unchosen(self, chunk_size: int) -> List[int]:
raise NotImplementedError()
def argsort_eval(self) -> np.ndarray:
raise NotImplementedError()
@property
def name(self) -> str:
return self.__class__.__name__
class RandomStrategy(Strategy):
def __init__(self):
super().__init__()
self._X_unchosen = None
self._Xf_unchosen = None
self._Xf_eval = None
def init_tagging(
self,
X_so_far: List[List[List[str]]],
Xf_so_far: List[Any],
y_so_far: List[List[List[str]]],
X_unchosen: List[List[List[str]]],
Xf_unchosen: List[Any],
X_eval: List[List[str]],
Xf_eval: List[Any],
):
self._X_unchosen = X_unchosen
self._Xf_unchosen = Xf_unchosen
self._Xf_eval = Xf_eval
def init_document_classification(
self,
X_so_far: List[str],
Xf_so_far: List[np.ndarray],
y_so_far: List[List[str]],
X_unchosen: List[str],
Xf_unchosen: List[np.ndarray],
X_eval: List[str],
Xf_eval: List[np.ndarray],
):
self._X_unchosen = X_unchosen
self._Xf_unchosen = Xf_unchosen
self._Xf_eval = Xf_eval
def init_pairwise(
self,
X_so_far: List[str],
Xf_so_far: List[np.ndarray],
y_so_far: List[List[str]],
X_unchosen: List[str],
Xf_unchosen: List[np.ndarray],
X_eval: List[str],
Xf_eval: List[np.ndarray],
):
self._X_unchosen = X_unchosen
self._Xf_unchosen = Xf_unchosen
self._Xf_eval = Xf_eval
def select(self) -> int:
return random.randrange(len(self._X_unchosen))
def argsort_unchosen(self, chunk_size: int) -> List[int]:
assert len(self._X_unchosen) == len(self._Xf_unchosen)
size = len(self._X_unchosen)
idx = list(range(size))
random.shuffle(idx)
return idx
def argsort_eval(self) -> np.ndarray:
size = len(self._Xf_eval)
idx = np.arange(size)
| np.random.shuffle(idx) | numpy.random.shuffle |
"""
Tests for MLP Regressor
"""
import sys
from unittest import mock
import numpy as np
import pytest
from sklearn.utils.testing import \
assert_equal, assert_array_almost_equal
import scipy.sparse as sp
from scipy.stats import pearsonr
from sklearn.datasets import load_diabetes, make_regression
from sklearn.utils.estimator_checks import check_estimator
from tensorflow import nn
from muffnn import MLPRegressor
from muffnn.mlp.tests.util import assert_sample_weights_work
# The defaults kwargs don't work for tiny datasets like those in these tests.
KWARGS = {"random_state": 0, "n_epochs": 1000, "batch_size": 1,
"hidden_units": ()}
# toy dataset where Y = x[0] -2 * x[1] + 2 + err
X = np.array([[-1, 0], [-2, 1], [1, 1], [2, 0], [-2, 0], [0, 2]],
dtype=np.float32)
X_sp = sp.csr_matrix(X)
Y = X[:, 0] - 2 * X[:, 1] + 2 + \
np.random.RandomState(42).randn(X.shape[0]) * 0.01
def check_predictions(est, X, y):
"""Check that the model is able to fit the regression training data.
based on
https://github.com/scikit-learn/scikit-learn/blob/af171b84bd3fb82eed4569aa0d1f976264ffae84/sklearn/linear_model/tests/test_logistic.py#L38
"""
n_samples = len(y)
preds = est.fit(X, y).predict(X)
assert_equal(preds.shape, (n_samples,))
assert_array_almost_equal(preds, y, decimal=1)
def test_sample_weight():
"""Ensure we handle sample weights for regression problems."""
assert_sample_weights_work(
make_regression,
{'n_samples': 3000},
lambda: MLPRegressor(n_epochs=30, random_state=42,
keep_prob=0.8, hidden_units=(128,))
)
# Make a subclass that has no `solver` parameter. The scikit-learn
# `check_estimator` has a check which fails with a class as a default.
class MLPRegressorFewerParams(MLPRegressor):
def __init__(self, hidden_units=(256,), batch_size=64, n_epochs=5,
keep_prob=1.0, activation=nn.relu,
random_state=None):
super(MLPRegressorFewerParams, self).__init__(
hidden_units=hidden_units, batch_size=batch_size,
n_epochs=n_epochs, keep_prob=keep_prob,
activation=activation,
random_state=random_state)
def test_check_estimator():
"""Check adherence to Estimator API."""
if sys.version_info.major == 3 and sys.version_info.minor == 7:
# Starting in Tensorflow 1.14 and Python 3.7, there's one module
# with a `0` in the __warningregistry__. Scikit-learn tries to clear
# this dictionary in its tests.
name = 'tensorboard.compat.tensorflow_stub.pywrap_tensorflow'
with mock.patch.object(sys.modules[name], '__warningregistry__', {}):
check_estimator(MLPRegressorFewerParams)
else:
check_estimator(MLPRegressorFewerParams)
def test_predict():
"""Test binary classification."""
check_predictions(MLPRegressor(**KWARGS), X, Y)
check_predictions(MLPRegressor(**KWARGS), X_sp, Y)
def test_replicability():
"""Make sure running fit twice in a row finds the same parameters."""
diabetes = load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
clf = MLPRegressor(keep_prob=0.9, random_state=42, n_epochs=100)
target = y_diabetes
# Just predict on the training set, for simplicity.
pred1 = clf.fit(X_diabetes, target).predict(X_diabetes)
pred2 = clf.fit(X_diabetes, target).predict(X_diabetes)
assert_array_almost_equal(pred1, pred2)
def test_partial_fit():
data = load_diabetes()
clf = MLPRegressor(n_epochs=1)
X, y = data['data'], data['target']
for _ in range(30):
clf.partial_fit(X, y)
y_pred = clf.predict(X)
assert pearsonr(y_pred, y)[0] > 0.5
def test_embedding_default():
# Make sure the embedding works by default.
data = load_diabetes()
X, y = data['data'], data['target']
clf = MLPRegressor(n_epochs=1)
clf.fit(X, y)
assert clf.transform(X).shape[1] == 256
def test_embedding_no_layers():
# Make sure the embedding works with no layers.
data = load_diabetes()
X, y = data['data'], data['target']
clf = MLPRegressor(n_epochs=1, hidden_units=[])
clf.fit(X, y)
assert clf.transform(X).shape[1] == 1
def test_embedding_specific_layer():
# Make sure the embedding works with no layers.
data = load_diabetes()
X, y = data['data'], data['target']
clf = MLPRegressor(
n_epochs=1,
hidden_units=(256, 8, 256),
transform_layer_index=1)
clf.fit(X, y)
assert clf.transform(X).shape[1] == 8
def test_prediction_gradient():
"""Test computation of prediction gradients."""
mlp = MLPRegressor(n_epochs=100, random_state=42, hidden_units=(5,))
X, y = make_regression(
n_samples=1000, n_features=10, n_informative=1, shuffle=False)
mlp.fit(X, y)
grad = mlp.prediction_gradient(X)
grad_means = grad.mean(axis=0)
assert grad.shape == X.shape
# Check that only the informative feature has a large gradient.
assert | np.abs(grad_means[0]) | numpy.abs |
"""
Tests speeds of different functions that simultaneously return the min and max of a numpy array.
Copied from: https://stackoverflow.com/questions/12200580/numpy-function-for-simultaneous-max-and-min
Results show that we can just use normal numpy np.min() and np.max() and it's not too much slower
"""
import numpy as np
from moredataframes.mdfutils import check_for_numba
from speedtester import speedtest
def _numba_while(arr):
n = arr.size
odd = n % 2
if not odd:
n -= 1
max_val = min_val = arr[0]
i = 1
while i < n:
x = arr[i]
y = arr[i + 1]
if x > y:
x, y = y, x
min_val = min(x, min_val)
max_val = max(y, max_val)
i += 2
if not odd:
x = arr[n]
min_val = min(x, min_val)
max_val = max(x, max_val)
return min_val, max_val
def _numba_loop(arr):
n = arr.size
max_val = min_val = arr[0]
for i in range(1, n):
item = arr[i]
if item > max_val:
max_val = item
elif item < min_val:
min_val = item
return min_val, max_val
def numpy_min_max(arr):
return | np.min(arr) | numpy.min |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
import pytest
from _pytest.outcomes import Skipped
import os
import numpy as np
import pyuvdata.tests as uvtest
from pyuvdata import UVData, UVCal, utils as uvutils
from pyuvdata.data import DATA_PATH
from pyuvdata import UVFlag
from ..uvflag import lst_from_uv, flags2waterfall, and_rows_cols
from pyuvdata import __version__
import shutil
import copy
import warnings
import h5py
import pathlib
test_d_file = os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcAA.uvh5")
test_c_file = os.path.join(DATA_PATH, "zen.2457555.42443.HH.uvcA.omni.calfits")
test_f_file = test_d_file.rstrip(".uvh5") + ".testuvflag.h5"
pyuvdata_version_str = " Read/written with pyuvdata version: " + __version__ + "."
pytestmark = pytest.mark.filterwarnings(
"ignore:telescope_location is not set. Using known values for HERA.",
"ignore:antenna_positions is not set. Using known values for HERA.",
)
@pytest.fixture(scope="session")
def uvdata_obj_main():
uvdata_object = UVData()
uvdata_object.read(test_d_file)
yield uvdata_object
# cleanup
del uvdata_object
return
@pytest.fixture(scope="function")
def uvdata_obj(uvdata_obj_main):
uvdata_object = uvdata_obj_main.copy()
yield uvdata_object
# cleanup
del uvdata_object
return
# The following three fixtures are used regularly
# to initizize UVFlag objects from standard files
# We need to define these here in order to set up
# some skips for developers who do not have `pytest-cases` installed
@pytest.fixture(scope="function")
def uvf_from_data(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj)
# yield the object for the test
yield uvf
# do some cleanup
del (uvf, uvdata_obj)
@pytest.fixture(scope="function")
def uvf_from_uvcal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag()
uvf.from_uvcal(uvc)
# the antenna type test file is large, so downselect to speed up
if uvf.type == "antenna":
uvf.select(antenna_nums=uvf.ant_array[:5])
# yield the object for the test
yield uvf
# do some cleanup
del (uvf, uvc)
@pytest.fixture(scope="function")
def uvf_from_waterfall(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj, waterfall=True)
# yield the object for the test
yield uvf
# do some cleanup
del uvf
# Try to import `pytest-cases` and define decorators used to
# iterate over the three main types of UVFlag objects
# otherwise make the decorators skip the tests that use these iterators
try:
pytest_cases = pytest.importorskip("pytest_cases", minversion="1.12.1")
cases_decorator = pytest_cases.parametrize(
"input_uvf",
[
pytest_cases.fixture_ref(uvf_from_data),
pytest_cases.fixture_ref(uvf_from_uvcal),
pytest_cases.fixture_ref(uvf_from_waterfall),
],
)
cases_decorator_no_waterfall = pytest_cases.parametrize(
"input_uvf",
[
pytest_cases.fixture_ref(uvf_from_data),
pytest_cases.fixture_ref(uvf_from_uvcal),
],
)
# This warning is raised by pytest_cases
# It is due to a feature the developer does
# not know how to handle yet. ignore for now.
warnings.filterwarnings(
"ignore",
message="WARNING the new order is not" + " taken into account !!",
append=True,
)
except Skipped:
cases_decorator = pytest.mark.skipif(
True, reason="pytest-cases not installed or not required version"
)
cases_decorator_no_waterfall = pytest.mark.skipif(
True, reason="pytest-cases not installed or not required version"
)
@pytest.fixture()
def test_outfile(tmp_path):
yield str(tmp_path / "outtest_uvflag.h5")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_check_flag_array(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj, mode="flag")
uvf.flag_array = np.ones((uvf.flag_array.shape), dtype=int)
with pytest.raises(
ValueError, match="UVParameter _flag_array is not the appropriate type.",
):
uvf.check()
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_bad_mode(uvdata_obj):
uv = uvdata_obj
with pytest.raises(ValueError) as cm:
UVFlag(uv, mode="bad_mode", history="I made a UVFlag object", label="test")
assert str(cm.value).startswith("Input mode must be within acceptable")
uv = UVCal()
uv.read_calfits(test_c_file)
with pytest.raises(ValueError) as cm:
UVFlag(uv, mode="bad_mode", history="I made a UVFlag object", label="test")
assert str(cm.value).startswith("Input mode must be within acceptable")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert uvf.metric_array.shape == uv.flag_array.shape
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == uv.flag_array.shape
assert np.all(uvf.weights_array == 1)
assert uvf.type == "baseline"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert "I made a UVFlag object" in uvf.history
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
assert uvf.label == "test"
assert uvf.filename == uv.filename
def test_add_extra_keywords(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
uvf.extra_keywords = {"keyword1": 1, "keyword2": 2}
assert "keyword1" in uvf.extra_keywords
assert "keyword2" in uvf.extra_keywords
uvf.extra_keywords["keyword3"] = 3
assert "keyword3" in uvf.extra_keywords
assert uvf.extra_keywords.get("keyword1") == 1
assert uvf.extra_keywords.get("keyword2") == 2
assert uvf.extra_keywords.get("keyword3") == 3
def test_read_extra_keywords(uvdata_obj):
uv = uvdata_obj
uv.extra_keywords = {"keyword1": 1, "keyword2": 2}
assert "keyword1" in uv.extra_keywords
assert "keyword2" in uv.extra_keywords
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert "keyword1" in uvf.extra_keywords
assert "keyword2" in uvf.extra_keywords
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata_x_orientation(uvdata_obj):
uv = uvdata_obj
uv.x_orientation = "east"
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert uvf.x_orientation == uv.x_orientation
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_init_uvdata_copy_flags(uvdata_obj, future_shapes):
uv = uvdata_obj
if future_shapes:
uv.use_future_array_shapes()
with uvtest.check_warnings(UserWarning, 'Copying flags to type=="baseline"'):
uvf = UVFlag(uv, copy_flags=True, mode="metric")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
if future_shapes:
assert np.array_equal(uvf.flag_array[:, 0, :, :], uv.flag_array)
else:
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.weights_array is None
assert uvf.type == "baseline"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
if future_shapes:
assert np.all(uvf.freq_array == uv.freq_array)
else:
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata_mode_flag(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag()
uvf.from_uvdata(uv, copy_flags=False, mode="flag")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.weights_array is None
assert uvf.type == "baseline"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_uvcal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
assert uvf.metric_array.shape == uvc.flag_array.shape
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == uvc.flag_array.shape
assert np.all(uvf.weights_array == 1)
assert uvf.type == "antenna"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == uvc.time_array)
assert uvf.x_orientation == uvc.x_orientation
lst = lst_from_uv(uvc)
assert np.all(uvf.lst_array == lst)
assert np.all(uvf.freq_array == uvc.freq_array[0])
assert np.all(uvf.polarization_array == uvc.jones_array)
assert np.all(uvf.ant_array == uvc.ant_array)
assert 'Flag object with type "antenna"' in uvf.history
assert pyuvdata_version_str in uvf.history
assert uvf.filename == uvc.filename
def test_init_uvcal_mode_flag():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc, copy_flags=False, mode="flag")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert np.array_equal(uvf.flag_array, uvc.flag_array)
assert uvf.weights_array is None
assert uvf.type == "antenna"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uvc.time_array)
lst = lst_from_uv(uvc)
assert np.all(uvf.lst_array == lst)
assert np.all(uvf.freq_array == uvc.freq_array[0])
assert np.all(uvf.polarization_array == uvc.jones_array)
assert np.all(uvf.ant_array == uvc.ant_array)
assert 'Flag object with type "antenna"' in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_cal_copy_flags():
uv = UVCal()
uv.read_calfits(test_c_file)
with uvtest.check_warnings(UserWarning, 'Copying flags to type=="antenna"'):
uvf = UVFlag(uv, copy_flags=True, mode="metric")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.type == "antenna"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.jones_array)
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_init_waterfall_uvd(uvdata_obj, future_shapes):
uv = uvdata_obj
if future_shapes:
uv.use_future_array_shapes()
uvf = UVFlag(uv, waterfall=True)
assert uvf.metric_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols)
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols)
assert np.all(uvf.weights_array == 1)
assert uvf.type == "waterfall"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.lst_array == np.unique(uv.lst_array))
if future_shapes:
assert np.all(uvf.freq_array == uv.freq_array)
else:
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_waterfall_uvc():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, waterfall=True, history="input history check")
assert uvf.metric_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones)
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones)
assert np.all(uvf.weights_array == 1)
assert uvf.type == "waterfall"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.jones_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert "input history check" in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_waterfall_flag_uvcal():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, waterfall=True, mode="flag")
assert uvf.flag_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones)
assert not np.any(uvf.flag_array)
assert uvf.weights_array is None
assert uvf.type == "waterfall"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.jones_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_waterfall_flag_uvdata(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, waterfall=True, mode="flag")
assert uvf.flag_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols)
assert not np.any(uvf.flag_array)
assert uvf.weights_array is None
assert uvf.type == "waterfall"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_waterfall_copy_flags(uvdata_obj):
uv = UVCal()
uv.read_calfits(test_c_file)
with pytest.raises(NotImplementedError) as cm:
UVFlag(uv, copy_flags=True, mode="flag", waterfall=True)
assert str(cm.value).startswith("Cannot copy flags when initializing")
uv = uvdata_obj
with pytest.raises(NotImplementedError) as cm:
UVFlag(uv, copy_flags=True, mode="flag", waterfall=True)
assert str(cm.value).startswith("Cannot copy flags when initializing")
def test_init_invalid_input():
# input is not UVData, UVCal, path, or list/tuple
with pytest.raises(ValueError) as cm:
UVFlag(14)
assert str(cm.value).startswith("input to UVFlag.__init__ must be one of:")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_from_uvcal_error(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag()
with pytest.raises(ValueError) as cm:
uvf.from_uvcal(uv)
assert str(cm.value).startswith("from_uvcal can only initialize a UVFlag object")
def test_from_udata_error():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag()
with pytest.raises(ValueError) as cm:
uvf.from_uvdata(uv)
assert str(cm.value).startswith("from_uvdata can only initialize a UVFlag object")
def test_init_list_files_weights(tmpdir):
# Test that weights are preserved when reading list of files
tmp_path = tmpdir.strpath
# Create two files to read
uvf = UVFlag(test_f_file)
np.random.seed(0)
wts1 = np.random.rand(*uvf.weights_array.shape)
uvf.weights_array = wts1.copy()
uvf.write(os.path.join(tmp_path, "test1.h5"))
wts2 = np.random.rand(*uvf.weights_array.shape)
uvf.weights_array = wts2.copy()
uvf.write(os.path.join(tmp_path, "test2.h5"))
uvf2 = UVFlag(
[os.path.join(tmp_path, "test1.h5"), os.path.join(tmp_path, "test2.h5")]
)
assert np.all(uvf2.weights_array == np.concatenate([wts1, wts2], axis=0))
def test_init_posix():
# Test that weights are preserved when reading list of files
testfile_posix = pathlib.Path(test_f_file)
uvf1 = UVFlag(test_f_file)
uvf2 = UVFlag(testfile_posix)
assert uvf1 == uvf2
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_data_like_property_mode_tamper(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.mode = "test"
with pytest.raises(ValueError) as cm:
list(uvf.data_like_parameters)
assert str(cm.value).startswith("Invalid mode. Mode must be one of")
def test_read_write_loop(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
assert uvf2.filename == [os.path.basename(test_outfile)]
def test_read_write_loop_with_optional_x_orientation(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.x_orientation = "east"
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_read_write_loop_waterfal(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.to_waterfall()
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_read_write_loop_ret_wt_sq(test_outfile):
uvf = UVFlag(test_f_file)
uvf.weights_array = 2 * np.ones_like(uvf.weights_array)
uvf.to_waterfall(return_weights_square=True)
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_bad_mode_savefile(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
# create the file so the clobber gets tested
with h5py.File(test_outfile, "w") as h5file:
h5file.create_dataset("Test", list(range(10)))
uvf.write(test_outfile, clobber=True)
# manually re-read and tamper with parameters
with h5py.File(test_outfile, "a") as h5:
mode = h5["Header/mode"]
mode[...] = np.string_("test")
with pytest.raises(ValueError) as cm:
uvf = UVFlag(test_outfile)
assert str(cm.value).startswith("File cannot be read. Received mode")
def test_bad_type_savefile(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.write(test_outfile, clobber=True)
# manually re-read and tamper with parameters
with h5py.File(test_outfile, "a") as h5:
mode = h5["Header/type"]
mode[...] = np.string_("test")
with pytest.raises(ValueError) as cm:
uvf = UVFlag(test_outfile)
assert str(cm.value).startswith("File cannot be read. Received type")
def test_write_add_version_str(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.write(test_outfile, clobber=True)
with h5py.File(test_outfile, "r") as h5:
assert h5["Header/history"].dtype.type is np.string_
hist = h5["Header/history"][()].decode("utf8")
assert pyuvdata_version_str in hist
def test_read_add_version_str(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
assert pyuvdata_version_str in uvf.history
uvf.write(test_outfile, clobber=True)
with h5py.File(test_outfile, "r") as h5:
hist = h5["Header/history"]
del hist
uvf2 = UVFlag(test_outfile)
assert pyuvdata_version_str in uvf2.history
assert uvf == uvf2
def test_read_write_ant(test_outfile):
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, mode="flag", label="test")
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_read_missing_nants_data(test_outfile):
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, mode="flag", label="test")
uvf.write(test_outfile, clobber=True)
with h5py.File(test_outfile, "a") as h5:
del h5["Header/Nants_data"]
with uvtest.check_warnings(UserWarning, "Nants_data not available in file,"):
uvf2 = UVFlag(test_outfile)
# make sure this was set to None
assert uvf2.Nants_data == len(uvf2.ant_array)
uvf2.Nants_data = uvf.Nants_data
# verify no other elements were changed
assert uvf.__eq__(uvf2, check_history=True)
def test_read_missing_nspws(test_outfile):
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, mode="flag", label="test")
uvf.write(test_outfile, clobber=True)
with h5py.File(test_outfile, "a") as h5:
del h5["Header/Nspws"]
uvf2 = UVFlag(test_outfile)
# make sure Nspws was calculated
assert uvf2.Nspws == 1
# verify no other elements were changed
assert uvf.__eq__(uvf2, check_history=True)
def test_read_write_nocompress(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.write(test_outfile, clobber=True, data_compression=None)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_read_write_nocompress_flag(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, mode="flag", label="test")
uvf.write(test_outfile, clobber=True, data_compression=None)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_read_write_extra_keywords(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.extra_keywords = {"keyword1": 1, "keyword2": "string"}
uvf.write(test_outfile, clobber=True, data_compression=None)
uvf2 = UVFlag(test_outfile)
assert uvf2.extra_keywords["keyword1"] == 1
assert uvf2.extra_keywords["keyword2"] == "string"
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_list(uvdata_obj):
uv = uvdata_obj
uv.time_array -= 1
uvf = UVFlag([uv, test_f_file])
uvf1 = UVFlag(uv)
uvf2 = UVFlag(test_f_file)
assert np.array_equal(
np.concatenate((uvf1.metric_array, uvf2.metric_array), axis=0), uvf.metric_array
)
assert np.array_equal(
np.concatenate((uvf1.weights_array, uvf2.weights_array), axis=0),
uvf.weights_array,
)
assert np.array_equal(
np.concatenate((uvf1.time_array, uvf2.time_array)), uvf.time_array
)
assert np.array_equal(
np.concatenate((uvf1.baseline_array, uvf2.baseline_array)), uvf.baseline_array
)
assert np.array_equal(
np.concatenate((uvf1.ant_1_array, uvf2.ant_1_array)), uvf.ant_1_array
)
assert np.array_equal(
np.concatenate((uvf1.ant_2_array, uvf2.ant_2_array)), uvf.ant_2_array
)
assert uvf.mode == "metric"
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
def test_read_list(uvdata_obj, test_outfile):
uv = uvdata_obj
uv.time_array -= 1
uvf = UVFlag(uv)
uvf.write(test_outfile, clobber=True)
uvf.read([test_outfile, test_f_file])
assert uvf.filename == sorted(
os.path.basename(file) for file in [test_outfile, test_f_file]
)
uvf1 = UVFlag(uv)
uvf2 = UVFlag(test_f_file)
assert np.array_equal(
np.concatenate((uvf1.metric_array, uvf2.metric_array), axis=0), uvf.metric_array
)
assert np.array_equal(
np.concatenate((uvf1.weights_array, uvf2.weights_array), axis=0),
uvf.weights_array,
)
assert np.array_equal(
np.concatenate((uvf1.time_array, uvf2.time_array)), uvf.time_array
)
assert np.array_equal(
np.concatenate((uvf1.baseline_array, uvf2.baseline_array)), uvf.baseline_array
)
assert np.array_equal(
np.concatenate((uvf1.ant_1_array, uvf2.ant_1_array)), uvf.ant_1_array
)
assert np.array_equal(
np.concatenate((uvf1.ant_2_array, uvf2.ant_2_array)), uvf.ant_2_array
)
assert uvf.mode == "metric"
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
def test_read_error():
with pytest.raises(IOError) as cm:
UVFlag("foo")
assert str(cm.value).startswith("foo not found")
def test_read_change_type(test_outfile):
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf.write(test_outfile, clobber=True)
assert hasattr(uvf, "ant_array")
uvf.read(test_f_file)
# clear sets these to None now
assert hasattr(uvf, "ant_array")
assert uvf.ant_array is None
assert hasattr(uvf, "baseline_array")
assert hasattr(uvf, "ant_1_array")
assert hasattr(uvf, "ant_2_array")
uvf.read(test_outfile)
assert hasattr(uvf, "ant_array")
assert hasattr(uvf, "baseline_array")
assert uvf.baseline_array is None
assert hasattr(uvf, "ant_1_array")
assert uvf.ant_1_array is None
assert hasattr(uvf, "ant_2_array")
assert uvf.ant_2_array is None
def test_read_change_mode(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, mode="flag")
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
uvf.write(test_outfile, clobber=True)
uvf.read(test_f_file)
assert hasattr(uvf, "metric_array")
assert hasattr(uvf, "flag_array")
assert uvf.flag_array is None
uvf.read(test_outfile)
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
def test_write_no_clobber():
uvf = UVFlag(test_f_file)
with pytest.raises(ValueError) as cm:
uvf.write(test_f_file)
assert str(cm.value).startswith("File " + test_f_file + " exists;")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_lst_from_uv(uvdata_obj):
uv = uvdata_obj
lst_array = lst_from_uv(uv)
assert np.allclose(uv.lst_array, lst_array)
def test_lst_from_uv_error():
with pytest.raises(ValueError) as cm:
lst_from_uv(4)
assert str(cm.value).startswith("Function lst_from_uv can only operate on")
def test_add():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.time_array += 1 # Add a day
uv3 = uv1 + uv2
assert np.array_equal(
np.concatenate((uv1.time_array, uv2.time_array)), uv3.time_array
)
assert np.array_equal(
np.concatenate((uv1.baseline_array, uv2.baseline_array)), uv3.baseline_array
)
assert np.array_equal(
np.concatenate((uv1.ant_1_array, uv2.ant_1_array)), uv3.ant_1_array
)
assert np.array_equal(
np.concatenate((uv1.ant_2_array, uv2.ant_2_array)), uv3.ant_2_array
)
assert np.array_equal(np.concatenate((uv1.lst_array, uv2.lst_array)), uv3.lst_array)
assert np.array_equal(
np.concatenate((uv1.metric_array, uv2.metric_array), axis=0), uv3.metric_array
)
assert np.array_equal(
np.concatenate((uv1.weights_array, uv2.weights_array), axis=0),
uv3.weights_array,
)
assert np.array_equal(uv1.freq_array, uv3.freq_array)
assert uv3.type == "baseline"
assert uv3.mode == "metric"
assert np.array_equal(uv1.polarization_array, uv3.polarization_array)
assert "Data combined along time axis. " in uv3.history
def test_add_collapsed_pols():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf.collapse_pol()
uvf3 = uvf.copy()
uvf3.time_array += 1 # increment the time array
uvf4 = uvf + uvf3
assert uvf4.Ntimes == 2 * uvf.Ntimes
assert uvf4.check()
def test_add_add_version_str():
uv1 = UVFlag(test_f_file)
uv1.history = uv1.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uv1.history
uv2 = copy.deepcopy(uv1)
uv2.time_array += 1 # Add a day
uv3 = uv1 + uv2
assert pyuvdata_version_str in uv3.history
def test_add_baseline():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.baseline_array += 100 # Arbitrary
uv3 = uv1.__add__(uv2, axis="baseline")
assert np.array_equal(
np.concatenate((uv1.time_array, uv2.time_array)), uv3.time_array
)
assert np.array_equal(
np.concatenate((uv1.baseline_array, uv2.baseline_array)), uv3.baseline_array
)
assert np.array_equal(
np.concatenate((uv1.ant_1_array, uv2.ant_1_array)), uv3.ant_1_array
)
assert np.array_equal(
np.concatenate((uv1.ant_2_array, uv2.ant_2_array)), uv3.ant_2_array
)
assert np.array_equal(np.concatenate((uv1.lst_array, uv2.lst_array)), uv3.lst_array)
assert np.array_equal(
np.concatenate((uv1.metric_array, uv2.metric_array), axis=0), uv3.metric_array
)
assert np.array_equal(
np.concatenate((uv1.weights_array, uv2.weights_array), axis=0),
uv3.weights_array,
)
assert np.array_equal(uv1.freq_array, uv3.freq_array)
assert uv3.type == "baseline"
assert uv3.mode == "metric"
assert np.array_equal(uv1.polarization_array, uv3.polarization_array)
assert "Data combined along baseline axis. " in uv3.history
def test_add_antenna():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uv1 = UVFlag(uvc)
uv2 = copy.deepcopy(uv1)
uv2.ant_array += 100 # Arbitrary
uv3 = uv1.__add__(uv2, axis="antenna")
assert np.array_equal(np.concatenate((uv1.ant_array, uv2.ant_array)), uv3.ant_array)
assert np.array_equal(
np.concatenate((uv1.metric_array, uv2.metric_array), axis=0), uv3.metric_array
)
assert np.array_equal(
np.concatenate((uv1.weights_array, uv2.weights_array), axis=0),
uv3.weights_array,
)
assert np.array_equal(uv1.freq_array, uv3.freq_array)
assert np.array_equal(uv1.time_array, uv3.time_array)
assert np.array_equal(uv1.lst_array, uv3.lst_array)
assert uv3.type == "antenna"
assert uv3.mode == "metric"
assert np.array_equal(uv1.polarization_array, uv3.polarization_array)
assert "Data combined along antenna axis. " in uv3.history
def test_add_frequency():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.freq_array += 1e4 # Arbitrary
uv3 = uv1.__add__(uv2, axis="frequency")
assert np.array_equal(
np.concatenate((uv1.freq_array, uv2.freq_array), axis=-1), uv3.freq_array
)
assert np.array_equal(uv1.time_array, uv3.time_array)
assert np.array_equal(uv1.baseline_array, uv3.baseline_array)
assert np.array_equal(uv1.ant_1_array, uv3.ant_1_array)
assert np.array_equal(uv1.ant_2_array, uv3.ant_2_array)
assert np.array_equal(uv1.lst_array, uv3.lst_array)
assert np.array_equal(
np.concatenate((uv1.metric_array, uv2.metric_array), axis=2), uv3.metric_array
)
assert np.array_equal(
np.concatenate((uv1.weights_array, uv2.weights_array), axis=2),
uv3.weights_array,
)
assert uv3.type == "baseline"
assert uv3.mode == "metric"
assert np.array_equal(uv1.polarization_array, uv3.polarization_array)
assert "Data combined along frequency axis. " in uv3.history
def test_add_frequency_with_weights_square():
# Same test as above, just checking an optional parameter (also in waterfall mode)
uvf1 = UVFlag(test_f_file)
uvf1.weights_array = 2 * np.ones_like(uvf1.weights_array)
uvf1.to_waterfall(return_weights_square=True)
uvf2 = copy.deepcopy(uvf1)
uvf2.freq_array += 1e4
uvf3 = uvf1.__add__(uvf2, axis="frequency")
assert np.array_equal(
np.concatenate((uvf1.weights_square_array, uvf2.weights_square_array), axis=1),
uvf3.weights_square_array,
)
def test_add_frequency_mix_weights_square():
# Same test as above, checking some error handling
uvf1 = UVFlag(test_f_file)
uvf1.weights_array = 2 * np.ones_like(uvf1.weights_array)
uvf2 = copy.deepcopy(uvf1)
uvf1.to_waterfall(return_weights_square=True)
uvf2.to_waterfall(return_weights_square=False)
uvf2.freq_array += 1e4
with pytest.raises(
ValueError,
match="weights_square_array optional parameter is missing from second UVFlag",
):
uvf1.__add__(uvf2, axis="frequency", inplace=True)
def test_add_pol():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.polarization_array += 1 # Arbitrary
uv3 = uv1.__add__(uv2, axis="polarization")
assert np.array_equal(uv1.freq_array, uv3.freq_array)
assert np.array_equal(uv1.time_array, uv3.time_array)
assert np.array_equal(uv1.baseline_array, uv3.baseline_array)
assert np.array_equal(uv1.ant_1_array, uv3.ant_1_array)
assert np.array_equal(uv1.ant_2_array, uv3.ant_2_array)
assert np.array_equal(uv1.lst_array, uv3.lst_array)
assert np.array_equal(
np.concatenate((uv1.metric_array, uv2.metric_array), axis=3), uv3.metric_array
)
assert np.array_equal(
np.concatenate((uv1.weights_array, uv2.weights_array), axis=3),
uv3.weights_array,
)
assert uv3.type == "baseline"
assert uv3.mode == "metric"
assert np.array_equal(
np.concatenate((uv1.polarization_array, uv2.polarization_array)),
uv3.polarization_array,
)
assert "Data combined along polarization axis. " in uv3.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_add_flag(uvdata_obj):
uv = uvdata_obj
uv1 = UVFlag(uv, mode="flag")
uv2 = copy.deepcopy(uv1)
uv2.time_array += 1 # Add a day
uv3 = uv1 + uv2
assert np.array_equal(
np.concatenate((uv1.time_array, uv2.time_array)), uv3.time_array
)
assert np.array_equal(
np.concatenate((uv1.baseline_array, uv2.baseline_array)), uv3.baseline_array
)
assert np.array_equal(
np.concatenate((uv1.ant_1_array, uv2.ant_1_array)), uv3.ant_1_array
)
assert np.array_equal(
np.concatenate((uv1.ant_2_array, uv2.ant_2_array)), uv3.ant_2_array
)
assert np.array_equal(np.concatenate((uv1.lst_array, uv2.lst_array)), uv3.lst_array)
assert np.array_equal(
np.concatenate((uv1.flag_array, uv2.flag_array), axis=0), uv3.flag_array
)
assert np.array_equal(uv1.freq_array, uv3.freq_array)
assert uv3.type == "baseline"
assert uv3.mode == "flag"
assert np.array_equal(uv1.polarization_array, uv3.polarization_array)
assert "Data combined along time axis. " in uv3.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_add_errors(uvdata_obj):
uv = uvdata_obj
uvc = UVCal()
uvc.read_calfits(test_c_file)
uv1 = UVFlag(uv)
# Mismatched classes
with pytest.raises(ValueError) as cm:
uv1.__add__(3)
assert str(cm.value).startswith(
"Only UVFlag objects can be added to a UVFlag object"
)
# Mismatched types
uv2 = UVFlag(uvc)
with pytest.raises(ValueError) as cm:
uv1.__add__(uv2)
assert str(cm.value).startswith("UVFlag object of type ")
# Mismatched modes
uv3 = UVFlag(uv, mode="flag")
with pytest.raises(ValueError) as cm:
uv1.__add__(uv3)
assert str(cm.value).startswith("UVFlag object of mode ")
# Invalid axes
with pytest.raises(ValueError) as cm:
uv1.__add__(uv1, axis="antenna")
assert str(cm.value).endswith("concatenated along antenna axis.")
with pytest.raises(ValueError) as cm:
uv2.__add__(uv2, axis="baseline")
assert str(cm.value).endswith("concatenated along baseline axis.")
def test_inplace_add():
uv1a = UVFlag(test_f_file)
uv1b = copy.deepcopy(uv1a)
uv2 = copy.deepcopy(uv1a)
uv2.time_array += 1
uv1a += uv2
assert uv1a.__eq__(uv1b + uv2)
def test_clear_unused_attributes():
uv = UVFlag(test_f_file)
assert hasattr(uv, "baseline_array")
assert hasattr(uv, "ant_1_array")
assert hasattr(uv, "ant_2_array")
assert hasattr(uv, "Nants_telescope")
uv._set_type_antenna()
uv.clear_unused_attributes()
# clear_unused_attributes now sets these to None
assert hasattr(uv, "baseline_array")
assert uv.baseline_array is None
assert hasattr(uv, "ant_1_array")
assert uv.ant_1_array is None
assert hasattr(uv, "ant_2_array")
assert uv.ant_2_array is None
assert hasattr(uv, "Nants_telescope")
assert uv.Nants_telescope is None
uv._set_mode_flag()
assert hasattr(uv, "metric_array")
uv.clear_unused_attributes()
assert hasattr(uv, "metric_array")
assert uv.metric_array is None
# Start over
uv = UVFlag(test_f_file)
uv.ant_array = np.array([4])
uv.flag_array = np.array([5])
uv.clear_unused_attributes()
assert hasattr(uv, "ant_array")
assert uv.ant_array is None
assert hasattr(uv, "flag_array")
assert uv.flag_array is None
def test_not_equal():
uvf1 = UVFlag(test_f_file)
# different class
assert not uvf1.__eq__(5)
# different mode
uvf2 = uvf1.copy()
uvf2.mode = "flag"
assert not uvf1.__eq__(uvf2)
# different type
uvf2 = uvf1.copy()
uvf2.type = "antenna"
assert not uvf1.__eq__(uvf2)
# array different
uvf2 = uvf1.copy()
uvf2.freq_array += 1
assert not uvf1.__eq__(uvf2)
# history different
uvf2 = uvf1.copy()
uvf2.history += "hello"
assert not uvf1.__eq__(uvf2, check_history=True)
def test_to_waterfall_bl():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf.to_waterfall()
assert uvf.type == "waterfall"
assert uvf.metric_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert uvf.weights_array.shape == uvf.metric_array.shape
def test_to_waterfall_add_version_str():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.to_waterfall()
assert pyuvdata_version_str in uvf.history
def test_to_waterfall_bl_multi_pol():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf2 = uvf.copy() # Keep a copy to run with keep_pol=False
uvf.to_waterfall()
assert uvf.type == "waterfall"
assert uvf.metric_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert uvf.weights_array.shape == uvf.metric_array.shape
assert len(uvf.polarization_array) == 2
# Repeat with keep_pol=False
uvf2.to_waterfall(keep_pol=False)
assert uvf2.type == "waterfall"
assert uvf2.metric_array.shape == (len(uvf2.time_array), len(uvf.freq_array), 1)
assert uvf2.weights_array.shape == uvf2.metric_array.shape
assert len(uvf2.polarization_array) == 1
assert uvf2.polarization_array[0] == np.str_(
",".join(map(str, uvf.polarization_array))
)
def test_to_waterfall_bl_ret_wt_sq():
uvf = UVFlag(test_f_file)
Nbls = uvf.Nbls
uvf.weights_array = 2 * np.ones_like(uvf.weights_array)
uvf.to_waterfall(return_weights_square=True)
assert np.all(uvf.weights_square_array == 4 * Nbls)
# Switch to flag and check that it is now set to None
uvf.to_flag()
assert uvf.weights_square_array is None
def test_collapse_pol(test_outfile):
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf2 = uvf.copy()
uvf2.collapse_pol()
assert len(uvf2.polarization_array) == 1
assert uvf2.polarization_array[0] == np.str_(
",".join(map(str, uvf.polarization_array))
)
assert uvf2.mode == "metric"
assert hasattr(uvf2, "metric_array")
assert hasattr(uvf2, "flag_array")
assert uvf2.flag_array is None
# test check passes just to be sure
assert uvf2.check()
# test writing it out and reading in to make sure polarization_array has
# correct type
uvf2.write(test_outfile, clobber=True)
with h5py.File(test_outfile, "r") as h5:
assert h5["Header/polarization_array"].dtype.type is np.string_
uvf = UVFlag(test_outfile)
assert uvf._polarization_array.expected_type == str
assert uvf._polarization_array.acceptable_vals is None
assert uvf == uvf2
os.remove(test_outfile)
def test_collapse_pol_add_pol_axis():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf2 = uvf.copy()
uvf2.collapse_pol()
with pytest.raises(NotImplementedError) as cm:
uvf2.__add__(uvf2, axis="pol")
assert str(cm.value).startswith("Two UVFlag objects with their")
def test_collapse_pol_or():
uvf = UVFlag(test_f_file)
uvf.to_flag()
assert uvf.weights_array is None
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf2 = uvf.copy()
uvf2.collapse_pol(method="or")
assert len(uvf2.polarization_array) == 1
assert uvf2.polarization_array[0] == np.str_(
",".join(map(str, uvf.polarization_array))
)
assert uvf2.mode == "flag"
assert hasattr(uvf2, "flag_array")
assert hasattr(uvf2, "metric_array")
assert uvf2.metric_array is None
def test_collapse_pol_add_version_str():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf2 = uvf.copy()
uvf2.collapse_pol(method="or")
assert pyuvdata_version_str in uvf2.history
def test_collapse_single_pol():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf2 = uvf.copy()
with uvtest.check_warnings(UserWarning, "Cannot collapse polarization"):
uvf.collapse_pol()
assert uvf == uvf2
def test_collapse_pol_flag():
uvf = UVFlag(test_f_file)
uvf.to_flag()
assert uvf.weights_array is None
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf2 = uvf.copy()
uvf2.collapse_pol()
assert len(uvf2.polarization_array) == 1
assert uvf2.polarization_array[0] == np.str_(
",".join(map(str, uvf.polarization_array))
)
assert uvf2.mode == "metric"
assert hasattr(uvf2, "metric_array")
assert hasattr(uvf2, "flag_array")
assert uvf2.flag_array is None
def test_to_waterfall_bl_flags():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf.to_waterfall()
assert uvf.type == "waterfall"
assert uvf.mode == "metric"
assert uvf.metric_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert uvf.weights_array.shape == uvf.metric_array.shape
assert len(uvf.lst_array) == len(uvf.time_array)
def test_to_waterfall_bl_flags_or():
uvf = UVFlag(test_f_file)
uvf.to_flag()
assert uvf.weights_array is None
uvf.to_waterfall(method="or")
assert uvf.type == "waterfall"
assert uvf.mode == "flag"
assert uvf.flag_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert len(uvf.lst_array) == len(uvf.time_array)
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf.to_waterfall(method="or")
assert uvf.type == "waterfall"
assert uvf.mode == "flag"
assert uvf.flag_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert len(uvf.lst_array) == len(uvf.time_array)
def test_to_waterfall_ant():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf.to_waterfall()
assert uvf.type == "waterfall"
assert uvf.metric_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert uvf.weights_array.shape == uvf.metric_array.shape
assert len(uvf.lst_array) == len(uvf.time_array)
def test_to_waterfall_waterfall():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf.to_waterfall()
with uvtest.check_warnings(UserWarning, "This object is already a waterfall"):
uvf.to_waterfall()
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_flags(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[0, 10, 0] = True # Flag time0, chan10
uvf.flag_array[1, 15, 0] = True # Flag time1, chan15
uvf.to_baseline(uv)
assert uvf.type == "baseline"
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.time_array == uv.time_array)
times = np.unique(uvf.time_array)
ntrue = 0.0
ind = np.where(uvf.time_array == times[0])[0]
ntrue += len(ind)
assert np.all(uvf.flag_array[ind, 0, 10, 0])
ind = np.where(uvf.time_array == times[1])[0]
ntrue += len(ind)
assert np.all(uvf.flag_array[ind, 0, 15, 0])
assert uvf.flag_array.mean() == ntrue / uvf.flag_array.size
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_to_baseline_metric(uvdata_obj, future_shapes):
uv = uvdata_obj
if future_shapes:
uv.use_future_array_shapes()
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.metric_array[0, 10, 0] = 3.2 # Fill in time0, chan10
uvf.metric_array[1, 15, 0] = 2.1 # Fill in time1, chan15
uvf.to_baseline(uv)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.time_array == uv.time_array)
times = np.unique(uvf.time_array)
ind = np.where(uvf.time_array == times[0])[0]
nt0 = len(ind)
assert np.all(uvf.metric_array[ind, 0, 10, 0] == 3.2)
ind = np.where(uvf.time_array == times[1])[0]
nt1 = len(ind)
assert np.all(uvf.metric_array[ind, 0, 15, 0] == 2.1)
assert np.isclose(
uvf.metric_array.mean(), (3.2 * nt0 + 2.1 * nt1) / uvf.metric_array.size
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_add_version_str(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.metric_array[0, 10, 0] = 3.2 # Fill in time0, chan10
uvf.metric_array[1, 15, 0] = 2.1 # Fill in time1, chan15
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.to_baseline(uv)
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_baseline_to_baseline(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf2 = uvf.copy()
uvf.to_baseline(uv)
assert uvf == uvf2
def test_to_baseline_metric_error(uvdata_obj, uvf_from_uvcal):
uvf = uvf_from_uvcal
uvf.select(polarizations=uvf.polarization_array[0])
uv = uvdata_obj
with pytest.raises(NotImplementedError) as cm:
uvf.to_baseline(uv, force_pol=True)
assert str(cm.value).startswith(
"Cannot currently convert from " "antenna type, metric mode"
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_from_antenna(uvdata_obj, uvf_from_uvcal):
uvf = uvf_from_uvcal
uvf.select(polarizations=uvf.polarization_array[0])
uvf.to_flag()
uv = uvdata_obj
ants_data = np.unique(uv.ant_1_array.tolist() + uv.ant_2_array.tolist())
new_ants = np.setdiff1d(ants_data, uvf.ant_array)
old_baseline = (uvf.ant_array[0], uvf.ant_array[1])
old_times = np.unique(uvf.time_array)
or_flags = np.logical_or(uvf.flag_array[0], uvf.flag_array[1])
or_flags = np.transpose(or_flags, [2, 0, 1, 3])
uv2 = copy.deepcopy(uv)
uvf2 = uvf.copy()
# hack in the exact times so we can compare some values later
uv2.select(bls=old_baseline)
uv2.time_array[: uvf2.time_array.size] = uvf.time_array
uvf.to_baseline(uv, force_pol=True)
uvf2.to_baseline(uv2, force_pol=True)
assert uvf.check()
uvf2.select(bls=old_baseline, times=old_times)
assert np.allclose(or_flags, uvf2.flag_array)
# all new antenna should be completely flagged
# checks auto correlations
uvf_new = uvf.select(antenna_nums=new_ants, inplace=False)
for bl in np.unique(uvf_new.baseline_array):
uvf2 = uvf_new.select(bls=uv.baseline_to_antnums(bl), inplace=False)
assert np.all(uvf2.flag_array)
# check for baselines with one new antenna
bls = [
uvf.baseline_to_antnums(bl)
for bl in uvf.baseline_array
if np.intersect1d(new_ants, uvf.baseline_to_antnums(bl)).size > 0
]
uvf_new = uvf.select(bls=bls, inplace=False)
for bl in np.unique(uvf_new.baseline_array):
uvf2 = uvf_new.select(bls=uv.baseline_to_antnums(bl), inplace=False)
assert np.all(uvf2.flag_array)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_errors(uvdata_obj):
uvc = UVCal()
uvc.read_calfits(test_c_file)
uv = uvdata_obj
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
with pytest.raises(ValueError) as cm:
uvf.to_baseline(7.3) # invalid matching object
assert str(cm.value).startswith("Must pass in UVData object or UVFlag object")
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
uvf2 = uvf.copy()
uvf.polarization_array[0] = -4
with pytest.raises(ValueError) as cm:
uvf.to_baseline(uv) # Mismatched pols
assert str(cm.value).startswith("Polarizations do not match.")
uvf.__iadd__(uvf2, axis="polarization")
with pytest.raises(ValueError) as cm:
uvf.to_baseline(uv) # Mismatched pols, can't be forced
assert str(cm.value).startswith("Polarizations could not be made to match.")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_force_pol(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[0, 10, 0] = True # Flag time0, chan10
uvf.flag_array[1, 15, 0] = True # Flag time1, chan15
uvf.polarization_array[0] = -4 # Change pol, but force pol anyway
uvf.to_baseline(uv, force_pol=True)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.time_array == uv.time_array)
assert np.array_equal(uvf.polarization_array, uv.polarization_array)
times = np.unique(uvf.time_array)
ntrue = 0.0
ind = np.where(uvf.time_array == times[0])[0]
ntrue += len(ind)
assert np.all(uvf.flag_array[ind, 0, 10, 0])
ind = np.where(uvf.time_array == times[1])[0]
ntrue += len(ind)
assert np.all(uvf.flag_array[ind, 0, 15, 0])
assert uvf.flag_array.mean() == ntrue / uvf.flag_array.size
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_force_pol_npol_gt_1(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[0, 10, 0] = True # Flag time0, chan10
uvf.flag_array[1, 15, 0] = True # Flag time1, chan15
uv2 = copy.deepcopy(uv)
uv2.polarization_array[0] = -6
uv += uv2
uvf.to_baseline(uv, force_pol=True)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.time_array == uv.time_array)
assert np.array_equal(uvf.polarization_array, uv.polarization_array)
assert uvf.Npols == len(uvf.polarization_array)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_metric_force_pol(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.metric_array[0, 10, 0] = 3.2 # Fill in time0, chan10
uvf.metric_array[1, 15, 0] = 2.1 # Fill in time1, chan15
uvf.polarization_array[0] = -4
uvf.to_baseline(uv, force_pol=True)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.time_array == uv.time_array)
assert np.array_equal(uvf.polarization_array, uv.polarization_array)
times = np.unique(uvf.time_array)
ind = np.where(uvf.time_array == times[0])[0]
nt0 = len(ind)
assert np.all(uvf.metric_array[ind, 0, 10, 0] == 3.2)
ind = np.where(uvf.time_array == times[1])[0]
nt1 = len(ind)
assert np.all(uvf.metric_array[ind, 0, 15, 0] == 2.1)
assert np.isclose(
uvf.metric_array.mean(), (3.2 * nt0 + 2.1 * nt1) / uvf.metric_array.size
)
def test_to_antenna_flags():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[0, 10, 0] = True # Flag time0, chan10
uvf.flag_array[1, 15, 0] = True # Flag time1, chan15
uvf.to_antenna(uvc)
assert uvf.type == "antenna"
assert np.all(uvf.ant_array == uvc.ant_array)
assert np.all(uvf.time_array == uvc.time_array)
assert np.all(uvf.flag_array[:, 0, 10, 0, 0])
assert np.all(uvf.flag_array[:, 0, 15, 1, 0])
assert uvf.flag_array.mean() == 2.0 * uvc.Nants_data / uvf.flag_array.size
def test_to_antenna_add_version_str():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[0, 10, 0] = True # Flag time0, chan10
uvf.flag_array[1, 15, 0] = True # Flag time1, chan15
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.to_antenna(uvc)
assert pyuvdata_version_str in uvf.history
def test_to_antenna_metric():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf.to_waterfall()
uvf.metric_array[0, 10, 0] = 3.2 # Fill in time0, chan10
uvf.metric_array[1, 15, 0] = 2.1 # Fill in time1, chan15
uvf.to_antenna(uvc)
assert np.all(uvf.ant_array == uvc.ant_array)
assert np.all(uvf.time_array == uvc.time_array)
assert np.all(uvf.metric_array[:, 0, 10, 0, 0] == 3.2)
assert np.all(uvf.metric_array[:, 0, 15, 1, 0] == 2.1)
assert np.isclose(
uvf.metric_array.mean(), (3.2 + 2.1) * uvc.Nants_data / uvf.metric_array.size
)
def test_to_antenna_flags_match_uvflag():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf2 = uvf.copy()
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[0, 10, 0] = True # Flag time0, chan10
uvf.flag_array[1, 15, 0] = True # Flag time1, chan15
uvf.to_antenna(uvf2)
assert np.all(uvf.ant_array == uvc.ant_array)
assert np.all(uvf.time_array == uvc.time_array)
assert np.all(uvf.flag_array[:, 0, 10, 0, 0])
assert np.all(uvf.flag_array[:, 0, 15, 1, 0])
assert uvf.flag_array.mean() == 2.0 * uvc.Nants_data / uvf.flag_array.size
def test_antenna_to_antenna():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf2 = uvf.copy()
uvf.to_antenna(uvc)
assert uvf == uvf2
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_antenna_errors(uvdata_obj):
uvc = UVCal()
uvc.read_calfits(test_c_file)
uv = uvdata_obj
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
with pytest.raises(ValueError) as cm:
uvf.to_antenna(7.3) # invalid matching object
assert str(cm.value).startswith("Must pass in UVCal object or UVFlag object ")
uvf = UVFlag(uv)
with pytest.raises(ValueError) as cm:
uvf.to_antenna(uvc) # Cannot pass in baseline type
assert str(cm.value).startswith('Cannot convert from type "baseline" to "antenna".')
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
uvf2 = uvf.copy()
uvf.polarization_array[0] = -4
with pytest.raises(ValueError) as cm:
uvf.to_antenna(uvc) # Mismatched pols
assert str(cm.value).startswith("Polarizations do not match. ")
uvf.__iadd__(uvf2, axis="polarization")
with pytest.raises(ValueError) as cm:
uvf.to_antenna(uvc) # Mismatched pols, can't be forced
assert str(cm.value).startswith("Polarizations could not be made to match.")
def test_to_antenna_force_pol():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvc.select(jones=-5)
uvf = UVFlag(uvc)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[0, 10, 0] = True # Flag time0, chan10
uvf.flag_array[1, 15, 0] = True # Flag time1, chan15
uvf.polarization_array[0] = -4 # Change pol, but force pol anyway
uvf.to_antenna(uvc, force_pol=True)
assert np.all(uvf.ant_array == uvc.ant_array)
assert np.all(uvf.time_array == uvc.time_array)
assert np.array_equal(uvf.polarization_array, uvc.jones_array)
assert np.all(uvf.flag_array[:, 0, 10, 0, 0])
assert np.all(uvf.flag_array[:, 0, 15, 1, 0])
assert uvf.flag_array.mean() == 2 * uvc.Nants_data / uvf.flag_array.size
def test_to_antenna_metric_force_pol():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvc.select(jones=-5)
uvf = UVFlag(uvc)
uvf.to_waterfall()
uvf.metric_array[0, 10, 0] = 3.2 # Fill in time0, chan10
uvf.metric_array[1, 15, 0] = 2.1 # Fill in time1, chan15
uvf.polarization_array[0] = -4
uvf.to_antenna(uvc, force_pol=True)
assert np.all(uvf.ant_array == uvc.ant_array)
assert np.all(uvf.time_array == uvc.time_array)
assert np.array_equal(uvf.polarization_array, uvc.jones_array)
assert np.all(uvf.metric_array[:, 0, 10, 0, 0] == 3.2)
assert np.all(uvf.metric_array[:, 0, 15, 1, 0] == 2.1)
assert np.isclose(
uvf.metric_array.mean(), (3.2 + 2.1) * uvc.Nants_data / uvf.metric_array.size
)
def test_copy():
uvf = UVFlag(test_f_file)
uvf2 = uvf.copy()
assert uvf == uvf2
# Make sure it's a copy and not just pointing to same object
uvf.to_waterfall()
assert uvf != uvf2
def test_or():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf2 = uvf.copy()
uvf2.flag_array = np.ones_like(uvf2.flag_array)
uvf.flag_array[0] = True
uvf2.flag_array[0] = False
uvf2.flag_array[1] = False
uvf3 = uvf | uvf2
assert np.all(uvf3.flag_array[0])
assert not np.any(uvf3.flag_array[1])
assert np.all(uvf3.flag_array[2:])
def test_or_add_version_str():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf2 = uvf.copy()
uvf2.flag_array = np.ones_like(uvf2.flag_array)
uvf.flag_array[0] = True
uvf2.flag_array[0] = False
uvf2.flag_array[1] = False
uvf3 = uvf | uvf2
assert pyuvdata_version_str in uvf3.history
def test_or_error():
uvf = UVFlag(test_f_file)
uvf2 = uvf.copy()
uvf.to_flag()
with pytest.raises(ValueError) as cm:
uvf.__or__(uvf2)
assert str(cm.value).startswith('UVFlag object must be in "flag" mode')
def test_or_add_history():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf2 = uvf.copy()
uvf2.history = "Different history"
uvf3 = uvf | uvf2
assert uvf.history in uvf3.history
assert uvf2.history in uvf3.history
assert "Flags OR'd with:" in uvf3.history
def test_ior():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf2 = uvf.copy()
uvf2.flag_array = np.ones_like(uvf2.flag_array)
uvf.flag_array[0] = True
uvf2.flag_array[0] = False
uvf2.flag_array[1] = False
uvf |= uvf2
assert np.all(uvf.flag_array[0])
assert not np.any(uvf.flag_array[1])
assert np.all(uvf.flag_array[2:])
def test_to_flag():
uvf = UVFlag(test_f_file)
uvf.to_flag()
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert uvf.mode == "flag"
assert 'Converted to mode "flag"' in uvf.history
def test_to_flag_add_version_str():
uvf = UVFlag(test_f_file)
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.to_flag()
assert pyuvdata_version_str in uvf.history
def test_to_flag_threshold():
uvf = UVFlag(test_f_file)
uvf.metric_array = np.zeros_like(uvf.metric_array)
uvf.metric_array[0, 0, 4, 0] = 2.0
uvf.to_flag(threshold=1.0)
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert uvf.mode == "flag"
assert uvf.flag_array[0, 0, 4, 0]
assert np.sum(uvf.flag_array) == 1.0
assert 'Converted to mode "flag"' in uvf.history
def test_flag_to_flag():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf2 = uvf.copy()
uvf2.to_flag()
assert uvf == uvf2
def test_to_flag_unknown_mode():
uvf = UVFlag(test_f_file)
uvf.mode = "foo"
with pytest.raises(ValueError) as cm:
uvf.to_flag()
assert str(cm.value).startswith("Unknown UVFlag mode: foo")
def test_to_metric_baseline():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf.flag_array[:, :, 10] = True
uvf.flag_array[1, :, :] = True
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert uvf.mode == "flag"
uvf.to_metric(convert_wgts=True)
assert hasattr(uvf, "metric_array")
assert hasattr(uvf, "flag_array")
assert uvf.flag_array is None
assert uvf.mode == "metric"
assert 'Converted to mode "metric"' in uvf.history
assert np.isclose(uvf.weights_array[1], 0.0).all()
assert np.isclose(uvf.weights_array[:, :, 10], 0.0).all()
def test_to_metric_add_version_str():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf.flag_array[:, :, 10] = True
uvf.flag_array[1, :, :] = True
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert uvf.mode == "flag"
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.to_metric(convert_wgts=True)
assert pyuvdata_version_str in uvf.history
def test_to_metric_waterfall():
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[:, 10] = True
uvf.flag_array[1, :, :] = True
uvf.to_metric(convert_wgts=True)
assert np.isclose(uvf.weights_array[1], 0.0).all()
assert np.isclose(uvf.weights_array[:, 10], 0.0).all()
def test_to_metric_antenna():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc, mode="flag")
uvf.flag_array[10, :, :, 1, :] = True
uvf.flag_array[15, :, 3, :, :] = True
uvf.to_metric(convert_wgts=True)
assert np.isclose(uvf.weights_array[10, :, :, 1, :], 0.0).all()
assert np.isclose(uvf.weights_array[15, :, 3, :, :], 0.0).all()
def test_metric_to_metric():
uvf = UVFlag(test_f_file)
uvf2 = uvf.copy()
uvf.to_metric()
assert uvf == uvf2
def test_to_metric_unknown_mode():
uvf = UVFlag(test_f_file)
uvf.mode = "foo"
with pytest.raises(ValueError) as cm:
uvf.to_metric()
assert str(cm.value).startswith("Unknown UVFlag mode: foo")
def test_antpair2ind():
uvf = UVFlag(test_f_file)
ind = uvf.antpair2ind(uvf.ant_1_array[0], uvf.ant_2_array[0])
assert np.all(uvf.ant_1_array[ind] == uvf.ant_1_array[0])
assert np.all(uvf.ant_2_array[ind] == uvf.ant_2_array[0])
def test_antpair2ind_nonbaseline():
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
with pytest.raises(ValueError) as cm:
uvf.antpair2ind(0, 3)
assert str(cm.value).startswith(
"UVFlag object of type "
+ uvf.type
+ " does not contain antenna "
+ "pairs to index."
)
def test_baseline_to_antnums():
uvf = UVFlag(test_f_file)
a1, a2 = uvf.baseline_to_antnums(uvf.baseline_array[0])
assert a1 == uvf.ant_1_array[0]
assert a2 == uvf.ant_2_array[0]
def test_get_baseline_nums():
uvf = UVFlag(test_f_file)
bls = uvf.get_baseline_nums()
assert np.array_equal(bls, np.unique(uvf.baseline_array))
def test_get_antpairs():
uvf = UVFlag(test_f_file)
antpairs = uvf.get_antpairs()
for a1, a2 in antpairs:
ind = np.where((uvf.ant_1_array == a1) & (uvf.ant_2_array == a2))[0]
assert len(ind) > 0
for a1, a2 in zip(uvf.ant_1_array, uvf.ant_2_array):
assert (a1, a2) in antpairs
def test_missing_nants_telescope(tmp_path):
testfile = str(tmp_path / "test_missing_Nants.h5")
shutil.copyfile(test_f_file, testfile)
with h5py.File(testfile, "r+") as f:
del f["/Header/Nants_telescope"]
with uvtest.check_warnings(
UserWarning, match="Nants_telescope not available in file",
):
uvf = UVFlag(testfile)
uvf2 = UVFlag(test_f_file)
uvf2.Nants_telescope = 2047
assert uvf == uvf2
os.remove(testfile)
def test_combine_metrics_inplace():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
np.random.seed(44)
uvf.metric_array = np.random.normal(size=uvf.metric_array.shape)
uvf2 = uvf.copy()
uvf2.metric_array *= 2
uvf3 = uvf.copy()
uvf3.metric_array *= 3
uvf.combine_metrics([uvf2, uvf3])
factor = np.sqrt((1 + 4 + 9) / 3.0) / 2.0
assert np.allclose(uvf.metric_array, np.abs(uvf2.metric_array) * factor)
def test_combine_metrics_not_inplace():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
np.random.seed(44)
uvf.metric_array = np.random.normal(size=uvf.metric_array.shape)
uvf2 = uvf.copy()
uvf2.metric_array *= 2
uvf3 = uvf.copy()
uvf3.metric_array *= 3
uvf4 = uvf.combine_metrics([uvf2, uvf3], inplace=False)
factor = np.sqrt((1 + 4 + 9) / 3.0)
assert np.allclose(uvf4.metric_array, np.abs(uvf.metric_array) * factor)
def test_combine_metrics_not_uvflag():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
with pytest.raises(ValueError) as cm:
uvf.combine_metrics("bubblegum")
assert str(cm.value).startswith('"others" must be UVFlag or list of UVFlag objects')
def test_combine_metrics_not_metric():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
np.random.seed(44)
uvf.metric_array = np.random.normal(size=uvf.metric_array.shape)
uvf2 = uvf.copy()
uvf2.to_flag()
with pytest.raises(ValueError) as cm:
uvf.combine_metrics(uvf2)
assert str(cm.value).startswith('UVFlag object and "others" must be in "metric"')
def test_combine_metrics_wrong_shape():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
np.random.seed(44)
uvf.metric_array = np.random.normal(size=uvf.metric_array.shape)
uvf2 = uvf.copy()
uvf2.to_waterfall()
with pytest.raises(ValueError) as cm:
uvf.combine_metrics(uvf2)
assert str(cm.value).startswith("UVFlag metric array shapes do not match.")
def test_combine_metrics_add_version_str():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
np.random.seed(44)
uvf.metric_array = np.random.normal(size=uvf.metric_array.shape)
uvf2 = uvf.copy()
uvf2.metric_array *= 2
uvf3 = uvf.copy()
uvf3.metric_array *= 3
uvf4 = uvf.combine_metrics([uvf2, uvf3], inplace=False)
assert pyuvdata_version_str in uvf4.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_super(uvdata_obj):
class TestClass(UVFlag):
def __init__(
self,
indata,
mode="metric",
copy_flags=False,
waterfall=False,
history="",
label="",
test_property="prop",
):
super(TestClass, self).__init__(
indata,
mode=mode,
copy_flags=copy_flags,
waterfall=waterfall,
history=history,
label=label,
)
self.test_property = test_property
uv = uvdata_obj
tc = TestClass(uv, test_property="test_property")
# UVFlag.__init__ is tested, so just see if it has a metric array
assert hasattr(tc, "metric_array")
# Check that it has the property
assert tc.test_property == "test_property"
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_flags2waterfall(uvdata_obj):
uv = uvdata_obj
np.random.seed(0)
uv.flag_array = np.random.randint(0, 2, size=uv.flag_array.shape, dtype=bool)
wf = flags2waterfall(uv)
assert np.allclose(np.mean(wf), np.mean(uv.flag_array))
assert wf.shape == (uv.Ntimes, uv.Nfreqs)
wf = flags2waterfall(uv, keep_pol=True)
assert wf.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols)
# Test external flag_array
uv.flag_array = np.zeros_like(uv.flag_array)
f = np.random.randint(0, 2, size=uv.flag_array.shape, dtype=bool)
wf = flags2waterfall(uv, flag_array=f)
assert np.allclose(np.mean(wf), np.mean(f))
assert wf.shape == (uv.Ntimes, uv.Nfreqs)
# UVCal version
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvc.flag_array = np.random.randint(0, 2, size=uvc.flag_array.shape, dtype=bool)
wf = flags2waterfall(uvc)
assert np.allclose(np.mean(wf), np.mean(uvc.flag_array))
assert wf.shape == (uvc.Ntimes, uvc.Nfreqs)
wf = flags2waterfall(uvc, keep_pol=True)
assert wf.shape == (uvc.Ntimes, uvc.Nfreqs, uvc.Njones)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_flags2waterfall_errors(uvdata_obj):
# First argument must be UVData or UVCal object
with pytest.raises(ValueError) as cm:
flags2waterfall(5)
assert str(cm.value).startswith(
"flags2waterfall() requires a UVData or " + "UVCal object"
)
uv = uvdata_obj
# Flag array must have same shape as uv.flag_array
with pytest.raises(ValueError) as cm:
flags2waterfall(uv, np.array([4, 5]))
assert str(cm.value).startswith("Flag array must align with UVData or UVCal")
def test_and_rows_cols():
d = np.zeros((10, 20), np.bool_)
d[1, :] = True
d[:, 2] = True
d[5, 10:20] = True
d[5:8, 5] = True
o = and_rows_cols(d)
assert o[1, :].all()
assert o[:, 2].all()
assert not o[5, :].all()
assert not o[:, 5].all()
def test_select_waterfall_errors(uvf_from_waterfall):
uvf = uvf_from_waterfall
with pytest.raises(ValueError) as cm:
uvf.select(antenna_nums=[0, 1, 2])
assert str(cm.value).startswith("Cannot select on antenna_nums with waterfall")
with pytest.raises(ValueError) as cm:
uvf.select(bls=[(0, 1), (0, 2)])
assert str(cm.value).startswith("Cannot select on bls with waterfall")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
@pytest.mark.parametrize("dimension", list(range(1, 4)))
def test_select_blt_inds(input_uvf, uvf_mode, dimension):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
if uvf.type == "baseline":
n_select = uvf.Nblts
else:
n_select = uvf.Ntimes
blt_inds = np.random.choice(n_select, size=n_select // 2, replace=False)
new_nblts = n_select // 2
if dimension == 1:
blt_inds = np.atleast_1d(blt_inds)
elif dimension == 2:
blt_inds = np.atleast_2d(blt_inds)
elif dimension == 3:
blt_inds = np.atleast_3d(blt_inds)
uvf1 = uvf.select(blt_inds=blt_inds, inplace=False)
# test the data was extracted correctly for each case
for param_name, new_param in zip(uvf._data_params, uvf1.data_like_parameters):
old_param = getattr(uvf, param_name)
if uvf.type == "baseline":
assert np.allclose(old_param[blt_inds.squeeze()], new_param)
if uvf.type == "antenna":
assert np.allclose(old_param[:, :, :, blt_inds.squeeze()], new_param)
if uvf.type == "waterfall":
assert np.allclose(old_param[blt_inds.squeeze()], new_param)
if uvf.type == "baseline":
assert uvf1.Nblts == new_nblts
else:
assert uvf1.Ntimes == new_nblts
# verify that histories are different
assert not uvutils._check_histories(uvf.history, uvf1.history)
if uvf.type == "baseline":
addition_str = "baseline-times"
else:
addition_str = "times"
assert uvutils._check_histories(
uvf.history + f" Downselected to specific {addition_str} using pyuvdata.",
uvf1.history,
)
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
@pytest.mark.parametrize(
"select_kwargs,err_msg",
[
({"blt_inds": []}, "No baseline-times were found"),
({"blt_inds": [int(1e9)]}, "blt_inds contains indices that are too large"),
({"blt_inds": [-1]}, "blt_inds contains indices that are negative"),
],
)
def test_select_blt_inds_errors(input_uvf, uvf_mode, select_kwargs, err_msg):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
with pytest.raises(ValueError, match=err_msg):
uvf.select(**select_kwargs)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator_no_waterfall
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
@pytest.mark.parametrize("dimension", list(range(1, 4)))
def test_select_antenna_nums(input_uvf, uvf_mode, dimension):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
old_history = copy.deepcopy(uvf.history)
np.random.seed(0)
if uvf.type == "baseline":
unique_ants = np.unique(uvf.ant_1_array.tolist() + uvf.ant_2_array.tolist())
ants_to_keep = np.random.choice(
unique_ants, size=unique_ants.size // 2, replace=False
)
blts_select = [
(a1 in ants_to_keep) & (a2 in ants_to_keep)
for (a1, a2) in zip(uvf.ant_1_array, uvf.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
else:
unique_ants = np.unique(uvf.ant_array)
ants_to_keep = np.random.choice(
unique_ants, size=unique_ants.size // 2, replace=False
)
if dimension == 1:
ants_to_keep = np.atleast_1d(ants_to_keep)
elif dimension == 2:
ants_to_keep = np.atleast_2d(ants_to_keep)
elif dimension == 3:
ants_to_keep = np.atleast_3d(ants_to_keep)
uvf2 = copy.deepcopy(uvf)
uvf2.select(antenna_nums=ants_to_keep)
# make 1-D for the remaining iterators in tests
ants_to_keep = ants_to_keep.squeeze()
assert ants_to_keep.size == uvf2.Nants_data
if uvf2.type == "baseline":
assert Nblts_selected == uvf2.Nblts
for ant in ants_to_keep:
assert ant in uvf2.ant_1_array or ant in uvf2.ant_2_array
for ant in np.unique(uvf2.ant_1_array.tolist() + uvf2.ant_2_array.tolist()):
assert ant in ants_to_keep
else:
for ant in ants_to_keep:
assert ant in uvf2.ant_array
for ant in np.unique(uvf2.ant_array):
assert ant in ants_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific antennas using pyuvdata.",
uvf2.history,
)
@cases_decorator_no_waterfall
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select_antenna_nums_error(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
# also test for error if antenna numbers not present in data
with pytest.raises(ValueError) as cm:
uvf.select(antenna_nums=[708, 709, 710])
assert str(cm.value).startswith("Antenna number 708 is not present")
def sort_bl(p):
"""Sort a tuple that starts with a pair of antennas, and may have stuff after."""
if p[1] >= p[0]:
return p
return (p[1], p[0]) + p[2:]
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator_no_waterfall
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select_bls(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
if uvf.type != "baseline":
with pytest.raises(ValueError) as cm:
uvf.select(bls=[(0, 1)])
assert str(cm.value).startswith(
'Only "baseline" mode UVFlag '
"objects may select along the "
"baseline axis"
)
else:
old_history = copy.deepcopy(uvf.history)
bls_select = np.random.choice(
uvf.baseline_array, size=uvf.Nbls // 2, replace=False
)
first_ants, second_ants = uvf.baseline_to_antnums(bls_select)
# give the conjugate bls for a few baselines
first_ants[5:8], second_ants[5:8] = (
copy.copy(second_ants[5:8]),
copy.copy(first_ants[5:8]),
)
new_unique_ants = np.unique(first_ants.tolist() + second_ants.tolist())
ant_pairs_to_keep = list(zip(first_ants, second_ants))
sorted_pairs_to_keep = [sort_bl(p) for p in ant_pairs_to_keep]
blts_select = [
sort_bl((a1, a2)) in sorted_pairs_to_keep
for (a1, a2) in zip(uvf.ant_1_array, uvf.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
uvf2 = copy.deepcopy(uvf)
uvf2.select(bls=ant_pairs_to_keep)
sorted_pairs_object2 = [
sort_bl(p) for p in zip(uvf2.ant_1_array, uvf2.ant_2_array)
]
assert len(new_unique_ants) == uvf2.Nants_data
assert Nblts_selected == uvf2.Nblts
for ant in new_unique_ants:
assert ant in uvf2.ant_1_array or ant in uvf2.ant_2_array
for ant in np.unique(uvf2.ant_1_array.tolist() + uvf2.ant_2_array.tolist()):
assert ant in new_unique_ants
for pair in sorted_pairs_to_keep:
assert pair in sorted_pairs_object2
for pair in sorted_pairs_object2:
assert pair in sorted_pairs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific baselines using pyuvdata.",
uvf2.history,
)
# Check with polarization too
first_ants, second_ants = uvf.baseline_to_antnums(bls_select)
# conjugate a few bls
first_ants[5:8], second_ants[5:8] = (
copy.copy(second_ants[5:8]),
copy.copy(first_ants[5:8]),
)
pols = ["xx"] * len(first_ants)
new_unique_ants = np.unique(first_ants.tolist() + second_ants.tolist())
ant_pairs_to_keep = list(zip(first_ants, second_ants, pols))
sorted_pairs_to_keep = [sort_bl(p) for p in ant_pairs_to_keep]
blts_select = [
sort_bl((a1, a2, "xx")) in sorted_pairs_to_keep
for (a1, a2) in zip(uvf.ant_1_array, uvf.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
uvf2 = copy.deepcopy(uvf)
uvf2.select(bls=ant_pairs_to_keep)
sorted_pairs_object2 = [
sort_bl(p) + ("xx",) for p in zip(uvf2.ant_1_array, uvf2.ant_2_array)
]
assert len(new_unique_ants) == uvf2.Nants_data
assert Nblts_selected == uvf2.Nblts
for ant in new_unique_ants:
assert ant in uvf2.ant_1_array or ant in uvf2.ant_2_array
for ant in np.unique(uvf2.ant_1_array.tolist() + uvf2.ant_2_array.tolist()):
assert ant in new_unique_ants
for pair in sorted_pairs_to_keep:
assert pair in sorted_pairs_object2
for pair in sorted_pairs_object2:
assert pair in sorted_pairs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to "
"specific baselines, polarizations using pyuvdata.",
uvf2.history,
)
# check that you can specify a single pair without errors
assert isinstance(ant_pairs_to_keep[0], tuple)
uvf2.select(bls=ant_pairs_to_keep[0])
sorted_pairs_object2 = [
sort_bl(p) + ("xx",) for p in zip(uvf2.ant_1_array, uvf2.ant_2_array)
]
assert list(set(sorted_pairs_object2)) == [ant_pairs_to_keep[0]]
@cases_decorator_no_waterfall
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
@pytest.mark.parametrize(
"select_kwargs,err_msg",
[
({"bls": [3]}, "bls must be a list of tuples"),
({"bls": [(np.pi, 2 * np.pi)]}, "bls must be a list of tuples of integer"),
(
{"bls": (0, 1, "xx"), "polarizations": [-5]},
"Cannot provide length-3 tuples and also specify polarizations.",
),
(
{"bls": (0, 1, 5)},
"The third element in each bl must be a polarization string",
),
({"bls": (455, 456)}, "Antenna number 455 is not present"),
({"bls": (97, 456)}, "Antenna number 456 is not present"),
(
{"bls": (97, 97)},
r"Antenna pair \(97, 97\) does not have any data associated with it.",
),
],
)
def test_select_bls_errors(input_uvf, uvf_mode, select_kwargs, err_msg):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
if uvf.type != "baseline":
with pytest.raises(ValueError) as cm:
uvf.select(bls=[(0, 1)])
assert str(cm.value).startswith(
'Only "baseline" mode UVFlag '
"objects may select along the "
"baseline axis"
)
else:
if select_kwargs["bls"] == (97, 97):
uvf.select(bls=[(97, 104), (97, 105), (88, 97)])
with pytest.raises(ValueError, match=err_msg):
uvf.select(**select_kwargs)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select_times(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
old_history = uvf.history
unique_times = np.unique(uvf.time_array)
times_to_keep = np.random.choice(
unique_times, size=unique_times.size // 2, replace=False
)
Nblts_selected = np.sum([t in times_to_keep for t in uvf.time_array])
uvf2 = copy.deepcopy(uvf)
uvf2.select(times=times_to_keep)
assert len(times_to_keep) == uvf2.Ntimes
if uvf2.type == "baseline":
n_compare = uvf2.Nblts
else:
n_compare = uvf2.Ntimes
assert Nblts_selected == n_compare
for t in times_to_keep:
assert t in uvf2.time_array
for t in np.unique(uvf2.time_array):
assert t in times_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific times using pyuvdata.",
uvf2.history,
)
# check that it also works with higher dimension array
uvf2 = copy.deepcopy(uvf)
uvf2.select(times=times_to_keep[np.newaxis, :])
assert len(times_to_keep) == uvf2.Ntimes
assert Nblts_selected == n_compare
for t in times_to_keep:
assert t in uvf2.time_array
for t in np.unique(uvf2.time_array):
assert t in times_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific times using pyuvdata.",
uvf2.history,
)
# check for errors associated with times not included in data
with pytest.raises(ValueError) as cm:
bad_time = [np.min(unique_times) - 0.005]
uvf.select(times=bad_time)
assert str(cm.value).startswith(
"Time {t} is not present in" " the time_array".format(t=bad_time[0])
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select_frequencies(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
old_history = uvf.history
freqs_to_keep = np.random.choice(
uvf.freq_array.squeeze(), size=uvf.Nfreqs // 10, replace=False
)
uvf2 = copy.deepcopy(uvf)
uvf2.select(frequencies=freqs_to_keep)
assert len(freqs_to_keep) == uvf2.Nfreqs
for f in freqs_to_keep:
assert f in uvf2.freq_array
for f in np.unique(uvf2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uvf2.history,
)
# check that it also works with higher dimension array
uvf2 = copy.deepcopy(uvf)
uvf2.select(frequencies=freqs_to_keep[np.newaxis, :])
assert len(freqs_to_keep) == uvf2.Nfreqs
for f in freqs_to_keep:
assert f in uvf2.freq_array
for f in np.unique(uvf2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uvf2.history,
)
# check that selecting one frequency works
uvf2 = copy.deepcopy(uvf)
uvf2.select(frequencies=freqs_to_keep[0])
assert 1 == uvf2.Nfreqs
assert freqs_to_keep[0] in uvf2.freq_array
for f in uvf2.freq_array:
assert f in [freqs_to_keep[0]]
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uvf2.history,
)
# check for errors associated with frequencies not included in data
with pytest.raises(ValueError) as cm:
bad_freq = [np.max(uvf.freq_array) + 100]
uvf.select(frequencies=bad_freq)
assert str(cm.value).startswith(
"Frequency {f} is not present in the freq_array".format(f=bad_freq[0])
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select_freq_chans(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
old_history = uvf.history
old_history = uvf.history
chans = np.random.choice(uvf.Nfreqs, 2)
c1, c2 = np.sort(chans)
chans_to_keep = np.arange(c1, c2)
uvf2 = copy.deepcopy(uvf)
uvf2.select(freq_chans=chans_to_keep)
assert len(chans_to_keep) == uvf2.Nfreqs
for chan in chans_to_keep:
if uvf2.type != "waterfall":
assert uvf.freq_array[0, chan] in uvf2.freq_array
else:
assert uvf.freq_array[chan] in uvf2.freq_array
for f in np.unique(uvf2.freq_array):
if uvf2.type != "waterfall":
assert f in uvf.freq_array[0, chans_to_keep]
else:
assert f in uvf.freq_array[chans_to_keep]
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uvf2.history,
)
# check that it also works with higher dimension array
uvf2 = copy.deepcopy(uvf)
uvf2.select(freq_chans=chans_to_keep[np.newaxis, :])
assert len(chans_to_keep) == uvf2.Nfreqs
for chan in chans_to_keep:
if uvf2.type != "waterfall":
assert uvf.freq_array[0, chan] in uvf2.freq_array
else:
assert uvf.freq_array[chan] in uvf2.freq_array
for f in np.unique(uvf2.freq_array):
if uvf2.type != "waterfall":
assert f in uvf.freq_array[0, chans_to_keep]
else:
assert f in uvf.freq_array[chans_to_keep]
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uvf2.history,
)
# Test selecting both channels and frequencies
chans = np.random.choice(uvf.Nfreqs, 2)
c1, c2 = np.sort(chans)
chans_to_keep = np.arange(c1, c2)
if uvf.type != "waterfall":
freqs_to_keep = uvf.freq_array[0, np.arange(c1 + 1, c2)] # Overlaps with chans
else:
freqs_to_keep = uvf.freq_array[np.arange(c1 + 1, c2)] # Overlaps with chans
all_chans_to_keep = np.arange(c1, c2)
uvf2 = copy.deepcopy(uvf)
uvf2.select(frequencies=freqs_to_keep, freq_chans=chans_to_keep)
assert len(all_chans_to_keep) == uvf2.Nfreqs
for chan in chans_to_keep:
if uvf2.type != "waterfall":
assert uvf.freq_array[0, chan] in uvf2.freq_array
else:
assert uvf.freq_array[chan] in uvf2.freq_array
for f in np.unique(uvf2.freq_array):
if uvf2.type != "waterfall":
assert f in uvf.freq_array[0, chans_to_keep]
else:
assert f in uvf.freq_array[chans_to_keep]
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
@pytest.mark.parametrize("pols_to_keep", ([-5], ["xx"], ["nn"], [[-5]]))
def test_select_polarizations(uvf_mode, pols_to_keep, input_uvf):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
old_history = uvf.history
uvf.x_orientation = "north"
uvf2 = copy.deepcopy(uvf)
uvf2.select(polarizations=pols_to_keep)
if isinstance(pols_to_keep[0], list):
pols_to_keep = pols_to_keep[0]
assert len(pols_to_keep) == uvf2.Npols
for p in pols_to_keep:
if isinstance(p, int):
assert p in uvf2.polarization_array
else:
assert (
uvutils.polstr2num(p, x_orientation=uvf2.x_orientation)
in uvf2.polarization_array
)
for p in np.unique(uvf2.polarization_array):
if isinstance(pols_to_keep[0], int):
assert p in pols_to_keep
else:
assert p in uvutils.polstr2num(
pols_to_keep, x_orientation=uvf2.x_orientation
)
assert uvutils._check_histories(
old_history + " Downselected to " "specific polarizations using pyuvdata.",
uvf2.history,
)
# check for errors associated with polarizations not included in data
with pytest.raises(ValueError) as cm:
uvf2.select(polarizations=[-3])
assert str(cm.value).startswith(
"Polarization {p} is not present in the polarization_array".format(p=-3)
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@cases_decorator
@pytest.mark.parametrize("uvf_mode", ["to_flag", "to_metric"])
def test_select(input_uvf, uvf_mode):
uvf = input_uvf
# used to set the mode depending on which input is given to uvf_mode
getattr(uvf, uvf_mode)()
np.random.seed(0)
old_history = uvf.history
# make new blts
if uvf.type == "baseline":
blt_inds = np.arange(uvf.Nblts - 1)
else:
blt_inds = np.arange(uvf.Ntimes - 1)
# new freqs
freqs_to_keep = np.random.choice(
uvf.freq_array.squeeze(), size=uvf.Nfreqs - 1, replace=False
)
# new ants
if uvf.type == "baseline":
unique_ants = np.unique(uvf.ant_1_array.tolist() + uvf.ant_2_array.tolist())
ants_to_keep = np.random.choice(
unique_ants, size=unique_ants.size - 1, replace=False
)
elif uvf.type == "antenna":
unique_ants = | np.unique(uvf.ant_array) | numpy.unique |
import numpy as np
import scipy.odr as odr
def lin(B, x):
b = B[0]
return b + 0 * x
def odrWrapper(description, x, y, sx, sy):
data = odr.RealData(x, y, sx, sy)
regression = odr.ODR(data, odr.Model(lin), beta0=[1])
regression = regression.run()
popt = regression.beta
cov_beta = np.sqrt(np.diag(regression.cov_beta))
sd_beta = regression.sd_beta
print(description, popt, sd_beta, cov_beta)
# constants
b = 50
n = 10000
noiseScale = 10
uncert = 1
np.random.seed(0)
# no noise no uncertanty
x = np.linspace(0, 100, n)
y = | np.ones(n) | numpy.ones |
#!/usr/bin/env python3
"""
Investigate DSC data.
Created on Fri Sep 13 12:44:01 2019
@author: slevy
"""
import dsc_extract_physio
import nibabel as nib
import numpy as np
import os
import matplotlib.pyplot as plt
import scipy.signal
import scipy.stats
import pydicom
from matplotlib import cm
from lmfit.models import GaussianModel
from datetime import datetime
import warnings
def extract_signal_within_roi(image, mask):
if len(image.shape) > 3:
nrep = image.shape[3]
s_along_reps = np.zeros((nrep))
s_along_reps_by_slice = np.zeros((nrep, image.shape[2]))
for i_rep in range(nrep):
img_rep_i = image[:, :, :, i_rep]
s_along_reps[i_rep] = np.mean(img_rep_i[mask > 0])
for i_z in range(image.shape[2]):
s_along_reps_by_slice[i_rep, i_z] = np.mean(img_rep_i[mask[:, :, i_z] > 0, i_z])
return s_along_reps, s_along_reps_by_slice
else:
s_whole_mask = np.mean(image[mask > 0])
s_by_slice = np.zeros((image.shape[2]))
for i_z in range(image.shape[2]):
s_by_slice[i_z] = np.mean(image[mask[:, :, i_z] > 0, i_z])
return s_whole_mask, s_by_slice
# def detect_outliers(signal, time):
#
# # thresholds for detection
# sd_t = np.std(signal[1:]) # first point is always outlier
# mean_baseline = np.mean(signal[0, 1:12])
#
#
# # find outliers =================================================================================
# signal_reptimes = np.vstack((s_along_reps, reps_acqtime))
# signal_reptimes_outliers = np.zeros((2, 1))
# signal_reptimes_outliers[:, 0] = signal_reptimes[:, 0] # save the first point as outlier because it is always corrupted in those data
# signal_reptimes_without_outliers = signal_reptimes[:, 1:] # remove the first point which is always corrupted with this sequence
#
# # if above 3 standard-deviation it is an outlier
# idx_outliers = np.where(np.abs(signal_reptimes_without_outliers[0, :] - mean_baseline) >= 3*sd_t) # find indexes of outliers
# signal_reptimes_outliers = np.hstack((signal_reptimes_outliers, signal_reptimes_without_outliers[:, idx_outliers[0]])) # save the detected outliers
# signal_reptimes_without_outliers = np.delete(signal_reptimes_without_outliers, idx_outliers, axis=1) # remove the outliers
# # by slice
# s_along_reps_by_slice = np.delete(s_along_reps_by_slice, 0, axis=0) # first point is always outlier
# sd_t_by_slice = np.std(s_along_reps_by_slice, axis=0) # temporal SD for each slice
# s_along_reps_by_slice_without_outliers = [] # [[signal, acqtimes], [,], [,] ]
# for i_z in range(dsc.shape[2]):
# idx_outliers_z_i = np.where(np.abs(s_along_reps_by_slice[:, i_z] - np.mean(s_along_reps_by_slice[0:11, i_z])) >= 3 * sd_t_by_slice[i_z]) # find indexes of outliers
# s_along_reps_by_slice_without_outliers.append([np.delete(s_along_reps_by_slice[:, i_z], idx_outliers_z_i), np.delete(signal_reptimes[1, 1:], idx_outliers_z_i)])
#
# return idx_outliers, signal_without_outliers, signal_outliers, time_without_outliers_time_outliers
def smooth_signal(signal, baseline_nb=10, windowLength=23, outPlotFname=''):
"""
Smooth signal.
:param signal: MRI signal, already regridded to a regular sampling
:param time:
:param baseline_nb:
:param increase_res_factor:
:return:
"""
# first point is always an outlier (and a NaN actually because of the TReff normalization)
# --> replace it by the mean signal at baseline
signal[0] = np.mean(signal[1:baseline_nb])
# # interpolate signal on regular grid
# t_regular_sampling = np.linspace(np.min(time), np.max(time), increase_res_factor * len(time))
# signal_interp = np.interp(t_regular_sampling, time, signal)
# replace
# signal_interp_smoothed = scipy.signal.savgol_filter(signal_interp, window_length=25, polyorder=3)
signal_smoothed = scipy.signal.savgol_filter(signal, window_length=windowLength, polyorder=5, mode='constant', cval=signal[0])
if outPlotFname:
# plot results
fig, ((ax1)) = plt.subplots(1, 1, figsize=(20, 9.5))
ax1.set_title('Final signal smoothing')
ax1.set_xlabel('Points')
ax1.plot(np.arange(signal.size), signal, label='original signal', color='black', lw=0.3, marker='+')
ax1.plot(np.arange(signal.size), signal_smoothed, label='smoothed signal', color='tab:blue', lw=0.3, marker='o', fillstyle='none')
ax1.legend()
ax1.grid()
fig.savefig(outPlotFname)
plt.close()
return signal_smoothed
def smoothlyCropSignal(mriSignalRegrid, firstPassStartRepRegrid, firstPassEndRepRegrid, injRepRegrid, outPlotFname=''):
"""
:param mriSignalRegrid:
:param baselineLastRepRegrid:
:param firstPassEndRepRegrid:
:param outPlotFname:
:return: mriSignalCropSmooth: signal cropped before first pass start and after first pass end with smooth transitions
mriSignalCropEndSmooth_forAIF: signal cropped only after half time of first pass (start time + (end time -
start time)/2) with smooth transition, to be used for AIF detection
"""
# calculate the baseline before and after contrast agent first pass
baselineBefore = np.mean(mriSignalRegrid[0:firstPassStartRepRegrid])
baselineAfter = | np.mean(mriSignalRegrid[firstPassEndRepRegrid:-1]) | numpy.mean |
"""
tellurium 1.3.5
auto-generated code
sedmlDoc: L1V2
workingDir: /home/mkoenig/git/tellurium/examples/tellurium-files/phrasedml/results/_te_case_12
inputType: COMBINE_FILE
"""
from __future__ import print_function, division
import tellurium as te
from roadrunner import Config
from tellurium.sedml.mathml import *
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
import libsedml
import pandas
import os.path
Config.LOADSBMLOPTIONS_RECOMPILE = True
workingDir = r'/home/mkoenig/git/tellurium/examples/tellurium-files/phrasedml/results/_te_case_12'
# --------------------------------------------------------
# Models
# --------------------------------------------------------
# Model <mod1>
mod1 = te.loadSBMLModel(os.path.join(workingDir, 'case_12.xml'))
# --------------------------------------------------------
# Tasks
# --------------------------------------------------------
# Task <task1>
# not part of any DataGenerator: task1
# Task <task2>
# not part of any DataGenerator: task2
# Task <repeat1>
repeat1 = []
__range__uniform_linear_for_S1 = np.linspace(start=0.0, stop=10.0, num=5)
for __k__uniform_linear_for_S1, __value__uniform_linear_for_S1 in enumerate(__range__uniform_linear_for_S1):
mod1.reset()
# Task: <task1>
task1 = [None]
mod1.setIntegrator('cvode')
mod1['init([S1])'] = __value__uniform_linear_for_S1
__value__S1 = mod1['init([S1])']
mod1['init([S2])'] = __value__S1 + 20
mod1.timeCourseSelections = ['[S1]', '[S2]', 'time']
mod1.simulate(start=0.0, end=2.0, points=2)
task1[0] = mod1.simulate(start=2.0, end=10.0, steps=49)
repeat1.extend(task1)
# Task <repeat2>
repeat2 = []
__range__uniform_linear_for_S1 = np.linspace(start=0.0, stop=10.0, num=5)
for __k__uniform_linear_for_S1, __value__uniform_linear_for_S1 in enumerate(__range__uniform_linear_for_S1):
mod1.reset()
# Task: <task2>
task2 = [None]
mod1.setIntegrator('cvode')
mod1['init([S1])'] = __value__uniform_linear_for_S1
__value__S1 = mod1['init([S1])']
mod1['init([S2])'] = __value__S1 + 20
mod1.timeCourseSelections = ['[S1]', '[S2]', 'time']
task2[0] = mod1.simulate(start=0.0, end=15.0, steps=49)
repeat2.extend(task2)
# --------------------------------------------------------
# DataGenerators
# --------------------------------------------------------
# DataGenerator <plot_0_0_0>
__var__repeat2_____time = np.transpose(np.array([sim['time'] for sim in repeat2]))
if len(__var__repeat2_____time.shape) == 1:
__var__repeat2_____time.shape += (1,)
plot_0_0_0 = __var__repeat2_____time
# DataGenerator <plot_0_0_1>
__var__repeat2_____S1 = np.transpose( | np.array([sim['[S1]'] for sim in repeat2]) | numpy.array |
#coding=utf-8
import tensorflow as tf
import numpy as np
import matplotlib.image as mpimg
import xml.etree.ElementTree as ET
from xml.dom.minidom import Document
import random
import os
import object_detection2.npod_toolkit as npod
import math
from semantic.visualization_utils import draw_bounding_boxes_on_image_tensors
import object_detection2.visualization as odv
import wml_utils
import logging
import shutil
from thirdparty.odmetrics import coco_evaluation
from thirdparty.odmetrics import standard_fields
import wml_utils as wmlu
import img_utils as wmli
import copy
from collections import OrderedDict
def __safe_persent(v0,v1):
if v1==0:
return 100.
else:
return v0*100./v1
def getF1(gtboxes,gtlabels,boxes,labels,threshold=0.5):
gt_shape = gtboxes.shape
#indict if there have some box match with this ground-truth box
gt_mask = np.zeros([gt_shape[0]],dtype=np.int32)
boxes_shape = boxes.shape
#indict if there have some ground-truth box match with this box
boxes_mask = np.zeros(boxes_shape[0],dtype=np.int32)
gt_size = gtlabels.shape[0]
boxes_size = labels.shape[0]
for i in range(gt_size):
max_index = -1
max_jaccard = 0.0
#iterator on all boxes to find one which have the most maximum jacard value with current ground-truth box
for j in range(boxes_size):
if gtlabels[i] != labels[j] or boxes_mask[j] != 0:
continue
jaccard = npod.box_jaccard(gtboxes[i],boxes[j])
if jaccard>threshold and jaccard > max_jaccard:
max_jaccard = jaccard
max_index = j
if max_index < 0:
continue
gt_mask[i] = 1
boxes_mask[max_index] = 1
correct_num = np.sum(gt_mask)
f1 = __safe_persent(2*correct_num,correct_num+gt_shape[0])
return f1
'''
gtboxes:[X,4](ymin,xmin,ymax,xmax) relative coordinates, ground truth boxes
gtlabels:[X] the labels for ground truth boxes
boxes:[Y,4](ymin,xmin,ymax,xmax) relative coordinates,predicted boxes
labels:[Y], the labels for predicted boxes
probability:[Y], the probability for boxes, if probability is none, assum the boxes's probability is ascending order
return:
mAP:[0,100]
'''
def getmAP(gtboxes,gtlabels,boxes,labels,probability=None,threshold=0.5):
if not isinstance(gtboxes,np.ndarray):
gtboxes = np.array(gtboxes)
if not isinstance(gtlabels,np.ndarray):
gtlabels = np.array(gtlabels)
if not isinstance(boxes,np.ndarray):
boxes = np.array(boxes)
if not isinstance(labels,np.ndarray):
labels = np.array(labels)
gtboxes = copy.deepcopy(np.array(gtboxes))
gtlabels = copy.deepcopy(np.array(gtlabels))
boxes = copy.deepcopy(boxes)
labels = copy.deepcopy(labels)
if probability is not None:
probability = copy.deepcopy(probability)
index = np.argsort(probability)
boxes = boxes[index]
labels = labels[index]
max_nr = 20
data_nr = boxes.shape[0]
if data_nr==0:
return 0.0
if data_nr>max_nr:
beg_index = range(0,data_nr,data_nr//max_nr)
else:
beg_index = range(0,data_nr)
res = []
for v in beg_index:
p,r = getPrecision(gtboxes,gtlabels,boxes[v:],labels[v:],threshold)
res.append([p,r])
res.sort(key=lambda x:x[1])
#print("mAP:res: {}".format(res))
min_r = res[0][1]
max_r = res[-1][1]
logging.debug("mAP: max r {}, min r {}".format(max_r,min_r))
if max_r-min_r<1.0:
p,r = getPrecision(gtboxes,gtlabels,boxes,labels,threshold)
res = [[p,r],[p,r]]
if min_r > 1e-2:
res = np.concatenate([np.array([[res[0][0],0.]]),res],axis=0)
if max_r <100.0-1e-2:
if max_r+10.<100.0:
res = np.concatenate([res,np.array([[0.,max_r+10.],[0.,100.]])])
else:
res = np.concatenate([res,np.array([[0.,max_r+10.]])])
res = np.array(res)
res = res.transpose()
precisions = res[0]
recall = res[1]
new_r = np.arange(0.,100.01,10.).tolist()
new_p = []
for r in new_r:
new_p.append(np.interp(r,recall,precisions))
precisions = np.array(new_p)
if precisions.shape[0]>1:
for i in range(precisions.shape[0]-1):
precisions[i] = np.max(precisions[i+1:])
return np.mean(precisions)
def getRecall(gtboxes,gtlabels,boxes,labels,threshold=0.5):
gt_shape = gtboxes.shape
#indict if there have some box match with this ground-truth box
gt_mask = np.zeros([gt_shape[0]],dtype=np.int32)
boxes_shape = boxes.shape
#indict if there have some ground-truth box match with this box
boxes_mask = np.zeros(boxes_shape[0],dtype=np.int32)
gt_size = gtlabels.shape[0]
boxes_size = labels.shape[0]
for i in range(gt_size):
max_index = -1
max_jaccard = 0.0
#iterator on all boxes to find one have the most maximum jacard value with current ground-truth box
for j in range(boxes_size):
if gtlabels[i] != labels[j] or boxes_mask[j] != 0:
continue
jaccard = npod.box_jaccard(gtboxes[i],boxes[j])
if jaccard>threshold and jaccard > max_jaccard:
max_jaccard = jaccard
max_index = j
if max_index < 0:
continue
gt_mask[i] = 1
boxes_mask[max_index] = 1
correct_num = np.sum(gt_mask)
total_num = gt_size
if 0 == total_num:
return 100.
return 100.*correct_num/total_num
def getPrecision(gtboxes,gtlabels,boxes,labels,threshold=0.5,auto_scale_threshold=True,ext_info=False):
'''
:param gtboxes: [N,4]
:param gtlabels: [N]
:param boxes: [M,4]
:param labels: [M]
:param threshold: nms_threshold,float
:return: precision,recall float
'''
if not isinstance(gtboxes,np.ndarray):
gtboxes = np.array(gtboxes)
if not isinstance(gtlabels,np.ndarray):
gtlabels = np.array(gtlabels)
gt_shape = gtboxes.shape
#indict if there have some box match with this ground-truth box
gt_mask = np.zeros([gt_shape[0]],dtype=np.int32)
boxes_shape = boxes.shape
#indict if there have some ground-truth box match with this box
boxes_mask = np.zeros(boxes_shape[0],dtype=np.int32)
gt_size = gtlabels.shape[0]
boxes_size = labels.shape[0]
MIN_VOL = 0.005
#print(">>>>",gtboxes,gtlabels)
for i in range(gt_size):
max_index = -1
max_jaccard = 0.0
t_threshold = threshold
if auto_scale_threshold:
#print(i,gtboxes,gtlabels)
vol = npod.box_vol(gtboxes[i])
if vol < MIN_VOL:
t_threshold = vol*threshold/MIN_VOL
#iterator on all boxes to find one have the most maximum jacard value with current ground-truth box
for j in range(boxes_size):
if gtlabels[i] != labels[j] or boxes_mask[j] != 0:
continue
jaccard = npod.box_jaccard(gtboxes[i],boxes[j])
if jaccard>t_threshold and jaccard > max_jaccard:
max_jaccard = jaccard
max_index = j
if max_index < 0:
continue
gt_mask[i] = 1
boxes_mask[max_index] = 1
correct_num = np.sum(gt_mask)
recall = __safe_persent(correct_num,gt_size)
precision = __safe_persent(correct_num,boxes_size)
P_v = gt_size
TP_v = correct_num
FP_v = boxes_size-correct_num
if ext_info:
gt_label_list = []
for i in range(gt_mask.shape[0]):
if gt_mask[i] != 1:
gt_label_list.append(gtlabels[i])
pred_label_list = []
for i in range(boxes_size):
if boxes_mask[i] != 1:
pred_label_list.append(labels[i])
return precision,recall,gt_label_list,pred_label_list,TP_v,FP_v,P_v
else:
return precision,recall
class PrecisionAndRecall:
def __init__(self,threshold=0.5,num_classes=90,label_trans=None,*args,**kwargs):
self.threshold = threshold
self.gtboxes = []
self.gtlabels = []
self.boxes = []
self.labels = []
self.precision = None
self.recall = None
self.total_test_nr = 0
self.num_classes = num_classes
self.label_trans = label_trans
def __call__(self, gtboxes,gtlabels,boxes,labels,probability=None,img_size=[512,512],
gtmasks=None,
masks=None,is_crowd=None,use_relative_coord=True):
if self.label_trans is not None:
gtlabels = self.label_trans(gtlabels)
labels = self.label_trans(labels)
if gtboxes.shape[0]>0:
self.gtboxes.append(gtboxes)
self.gtlabels.append(np.array(gtlabels)+self.total_test_nr*self.num_classes)
if boxes.shape[0]>0:
self.boxes.append(boxes)
self.labels.append(np.array(labels)+self.total_test_nr*self.num_classes)
self.total_test_nr += 1
def evaluate(self):
if self.total_test_nr==0 or len(self.boxes)==0 or len(self.labels)==0:
self.precision,self.recall = 0,0
return
gtboxes = np.concatenate(self.gtboxes,axis=0)
gtlabels = np.concatenate(self.gtlabels,axis=0)
boxes = np.concatenate(self.boxes,axis=0)
labels = np.concatenate(self.labels,axis=0)
self.precision,self.recall = getPrecision(gtboxes, gtlabels, boxes, labels, threshold=self.threshold,
auto_scale_threshold=False, ext_info=False)
def show(self,name=""):
self.evaluate()
res = f"{name}: total test nr {self.total_test_nr}, precision {self.precision:.3f}, recall {self.recall:.3f}"
print(res)
def to_string(self):
return f"{self.precision:.3f}/{self.recall:.3f}({self.total_test_nr})"
class ROC:
def __init__(self,threshold=0.5,num_classes=90,label_trans=None,*args,**kwargs):
self.threshold = threshold
self.gtboxes = []
self.gtlabels = []
self.boxes = []
self.labels = []
self.probs = []
self.precision = None
self.recall = None
self.total_test_nr = 0
self.num_classes = num_classes
self.label_trans = label_trans
self.results = None
def __call__(self, gtboxes,gtlabels,boxes,labels,probability=None,img_size=[512,512],
gtmasks=None,
masks=None,is_crowd=None):
if self.label_trans is not None:
gtlabels = self.label_trans(gtlabels)
labels = self.label_trans(labels)
if gtboxes.shape[0]>0:
self.gtboxes.append(gtboxes)
self.gtlabels.append(np.array(gtlabels)+self.total_test_nr*self.num_classes)
if boxes.shape[0]>0:
self.boxes.append(boxes)
self.labels.append(np.array(labels)+self.total_test_nr*self.num_classes)
self.probs.append(np.array(probability))
self.total_test_nr += 1
def evaluate(self):
if self.total_test_nr==0 or len(self.boxes)==0 or len(self.labels)==0:
self.precision,self.recall = 0,0
return
gtboxes = np.concatenate(self.gtboxes,axis=0)
gtlabels = np.concatenate(self.gtlabels,axis=0)
boxes = np.concatenate(self.boxes,axis=0)
labels = np.concatenate(self.labels,axis=0)
probs = np.concatenate(self.probs,axis=0)
self.results = []
for p in np.arange(0,1,0.05):
mask = np.greater(probs,p)
t_boxes = boxes[mask]
t_labels = labels[mask]
precision, recall, gt_label_list, pred_label_list, TP_v, FP_v, P_v = \
getPrecision(gtboxes, gtlabels, t_boxes, t_labels, threshold=self.threshold,
auto_scale_threshold=False, ext_info=True)
self.results.append([p,precision,recall])
def show(self,name=""):
print(self.to_string())
def to_string(self):
self.evaluate()
res = ""
if self.results is None or len(self.results) == 0:
return res
for p, precision, recall in self.results:
res += f"{p:.3f},{precision:.3f},{recall:.3f};\n"
return res
class ModelPerformance:
def __init__(self,threshold,no_mAP=False,no_F1=False):
self.total_map = 0.
self.total_recall = 0.
self.total_precision = 0.
self.total_F1 = 0.
self.threshold = threshold
self.test_nr = 0
self.no_mAP=no_mAP
self.no_F1 = no_F1
def __call__(self, gtboxes,gtlabels,boxes,labels,probability=None):
gtboxes = copy.deepcopy(np.array(gtboxes))
gtlabels = copy.deepcopy(np.array(gtlabels))
boxes = copy.deepcopy(boxes)
labels = copy.deepcopy(labels)
if probability is not None:
probability = copy.deepcopy(probability)
if self.no_mAP:
ap = 0.
else:
ap = getmAP(gtboxes, gtlabels, boxes, labels, probability=probability,threshold=self.threshold)
rc = getRecall(gtboxes, gtlabels, boxes, labels, self.threshold)
if self.no_F1:
f1 = 0.
else:
f1 = getF1(gtboxes, gtlabels, boxes, labels, self.threshold)
pc,_ = getPrecision(gtboxes, gtlabels, boxes, labels, self.threshold)
self.total_map += ap
self.total_recall += rc
self.total_precision += pc
self.total_F1 += f1
self.test_nr += 1
return ap,rc,pc,f1
@staticmethod
def safe_div(v0,v1):
if math.fabs(v1)<1e-8:
return 0.
return v0/v1
def __getattr__(self, item):
if item=="mAP":
return self.safe_div(self.total_map,self.test_nr)
elif item =="recall":
return self.safe_div(self.total_recall,self.test_nr)
elif item=="precision":
return self.safe_div(self.total_precision,self.test_nr)
class GeneralCOCOEvaluation(object):
def __init__(self,categories_list=None,num_classes=None,mask_on=False,label_trans=None):
if categories_list is None:
self.categories_list = [{"id":x+1,"name":str(x+1)} for x in range(num_classes)]
else:
self.categories_list = categories_list
if not mask_on:
self.coco_evaluator = coco_evaluation.CocoDetectionEvaluator(
self.categories_list,include_metrics_per_category=False)
else:
self.coco_evaluator = coco_evaluation.CocoMaskEvaluator(
self.categories_list,include_metrics_per_category=False)
self.label_trans = label_trans
self.image_id = 0
self.cached_values = {}
'''
gtboxes:[N,4]
gtlabels:[N]
img_size:[H,W]
gtmasks:[N,H,W]
'''
def __call__(self, gtboxes,gtlabels,boxes,labels,probability=None,img_size=[512,512],
gtmasks=None,
masks=None,is_crowd=None,use_relative_coord=True):
if probability is None:
probability = np.ones_like(labels,dtype=np.float32)
if not isinstance(gtboxes,np.ndarray):
gtboxes = np.array(gtboxes)
if not isinstance(gtlabels,np.ndarray):
gtlabels = np.array(gtlabels)
if not isinstance(boxes,np.ndarray):
boxes = np.array(boxes)
if not isinstance(labels,np.ndarray):
labels = | np.array(labels) | numpy.array |
import open3d as o3d
import numpy as np
from . import convert
from . import sanity
def create_camera_center_line(Ts, color=np.array([1, 0, 0])):
num_nodes = len(Ts)
camera_centers = [convert.T_to_C(T) for T in Ts]
ls = o3d.geometry.LineSet()
lines = [[x, x + 1] for x in range(num_nodes - 1)]
colors = np.tile(color, (len(lines), 1))
ls.points = o3d.utility.Vector3dVector(camera_centers)
ls.lines = o3d.utility.Vector2iVector(lines)
ls.colors = o3d.utility.Vector3dVector(colors)
return ls
def create_camera_frame(T, size=0.1, color=[0, 0, 1]):
R, t = T[:3, :3], T[:3, 3]
C0 = convert.R_t_to_C(R, t).ravel()
C1 = (C0 + R.T.dot(
np.array([[-size], [-size], [3 * size]], dtype=np.float32)).ravel())
C2 = (C0 + R.T.dot(
np.array([[-size], [+size], [3 * size]], dtype=np.float32)).ravel())
C3 = (C0 + R.T.dot(
np.array([[+size], [+size], [3 * size]], dtype=np.float32)).ravel())
C4 = (C0 + R.T.dot(
np.array([[+size], [-size], [3 * size]], dtype=np.float32)).ravel())
ls = o3d.geometry.LineSet()
points = np.array([C0, C1, C2, C3, C4])
lines = [[0, 1], [0, 2], [0, 3], [0, 4], [1, 2], [2, 3], [3, 4], [4, 1]]
colors = np.tile(color, (len(lines), 1))
ls.points = o3d.utility.Vector3dVector(points)
ls.lines = o3d.utility.Vector2iVector(lines)
ls.colors = o3d.utility.Vector3dVector(colors)
return ls
def create_camera_frames(Ts,
size=0.1,
color=[0, 0, 1],
start_color=[0, 1, 0],
end_color=[1, 0, 0],
center_line=True,
center_line_color=[1, 0, 0]):
camera_frames = o3d.geometry.LineSet()
for index, T in enumerate(Ts):
if index == 0:
frame_color = start_color
elif index == len(Ts) - 1:
frame_color = end_color
else:
frame_color = color
camera_frame = create_camera_frame(T, size=size, color=frame_color)
camera_frames += camera_frame
if len(Ts) > 1 and center_line:
center_line = create_camera_center_line(Ts, color=center_line_color)
camera_frames += center_line
return camera_frames
def create_camera_center_ray(K, T, size=0.1, color=[0, 0, 1]):
"""
K: 3x3
T: 4x4
Returns a linset of two points. The line starts the camera center and passes
through the center of the image.
"""
sanity.assert_T(T)
sanity.assert_K(K)
# Pick point at the center of the image
# Assumes that the camera offset is exactly at the center of the image.
col = K[0, 2]
row = K[1, 2]
points = np.array([
[col, row, 1],
])
# Transform to camera space
points = (np.linalg.inv(K) @ points.T).T
# Normalize to have 1 distance
points = points / | np.linalg.norm(points, axis=1, keepdims=True) | numpy.linalg.norm |
#!/usr/bin/env python3
#<NAME> 2019
import numpy as np
import matplotlib
import os
import pwd
os.getlogin = lambda: pwd.getpwuid(os.getuid())[0]
if os.getlogin() != 'oliver':
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import pickle
import lightkurve as lk
import pandas as pd
import pystan
import astropy.units as u
import sys
from astropy.units import cds
import corner
import glob
import time
import lightkurve as lk
from astropy.io import ascii
timestr = time.strftime("%m%d-%H%M")
import argparse
parser = argparse.ArgumentParser(description='Run our PyStan model')
parser.add_argument('iters', type=int, help='Number of MCMC iterations in PyStan.')
parser.add_argument('idx',type=int,help='Index on the kiclist')
args = parser.parse_args()
__iter__ = args.iters
def create_model(overwrite=True):
overwrite = True
malatium = '''
functions{
vector lorentzian(real loc, int l, int m, vector f, real eps, real H, real w, real nus){
return (eps * H) ./ (1 + (4/w^2) * square(f - loc + m*nus));
}
real harvey(real f, real a, real b, real c){
return 0.9*a^2/b/(1.0 + (f/b)^c);
}
real apod(real f, real nyq){
real x = 3.14 / 2.0 * f / nyq;
return (sin(x) / x)^2;
}
real background(real f, real a, real b, real c, real d, real j, real k,
real numax, real white, real nyq, real scale){
return (apod(f, nyq) * scale
* (harvey(f, a, b, 4.0)
+ harvey(f, c, d, 4.0)
+ harvey(f, j, k, 2.0))
+ white);
}
}
data{
int N; // Number of data points
int M; // Number of modes
vector[N] f; // Frequency
vector[N] p; // Power
real pr_locs[M]; // Mode locations (this will have to change for multiple n modes)
real e_locs[M]; // Uncertainty on the mode locations
int ids[M]; // The ID's of the modes
real rho; // The length scale of the GP Gamma prior
vector[10] pr_phi; // The initial guesses for the background parameters
cov_matrix[10] sigphi; // The covariance of the background parameters
}
transformed data{
matrix[10,10] L_sigphi = cholesky_decompose(sigphi);
}
parameters{
real logAmp[M]; // Mode amplitude in log space
vector[M] logGamma; // Mode linewidth in log space
real locs[M]; // True mode locations
real<lower=0> vsini; // Sin of angle of inclination x rotational splitting
real<lower=0> vcosi; // Cos of angle of inclination x rotational splitting
real<lower=0.> alpha; // Spread on the squared exponential kernel
vector[10] phi; // The background parameters
}
transformed parameters{
real numax = 10^phi[7]; // Background parameters
real<lower=0> logac = phi[1] - phi[3]; // Background parameters
real<lower=0> logdb = phi[4] - phi[2]; // Background parameters
real H[M]; // Mode height
real w[M]; // Mode linewidth
real i; // Angle of inclination (rad)
real<lower=0> nus; // Rotational frequency splitting
matrix[M, M] gpG = cov_exp_quad(pr_locs, alpha, rho)
+diag_matrix(rep_vector(1e-10, M));
matrix[M, M] LgpG = cholesky_decompose(gpG);
nus = sqrt(vsini^2 + vcosi^2); //Calculate the splitting
i = acos(vcosi / nus); // Calculate the inclination
for (m in 1:M){
w[m] = 10^logGamma[m]; // Transform log linewidth to linewidth
H[m] = 10^logAmp[m] / pi() / w[m]; // Transform mode amplitude to mode height
}
}
model{
real a = 10^phi[1]; // Caculate the linear background parameters
real b = 10^phi[2];
real c = 10^phi[3];
real d = 10^phi[4];
real j = 10^phi[5];
real k = 10^phi[6];
vector[N] modes; // Our Model
matrix[4,4] eps; // Matrix of legendre polynomials
int l; // The radial degree
real nus_mu = 0.5; // Circumventing a Stan problem
eps = rep_matrix(1., 4, 4); // Calculate all the legendre polynomials for this i
eps[0+1,0+1] = 1.;
eps[1+1,0+1] = cos(i)^2;
eps[1+1,1+1] = 0.5 * sin(i)^2;
eps[2+1,0+1] = 0.25 * (3. * cos(i)^2 - 1.)^2;
eps[2+1,1+1] = (3./8.)*sin(2*i)^2;
eps[2+1,2+1] = (3./8.) * sin(i)^4;
eps[3+1,0+1] = (1./64.)*(5.*cos(3.*i) + 3.*cos(i))^2;
eps[3+1,1+1] = (3./64.)*(5.*cos(2.*i) + 3.)^2 * sin(i)^2;
eps[3+1,2+1] = (15./8.)*cos(i)^2 * sin(i)^4;
eps[3+1,3+1] = (5./16.)*sin(i)^6;
// Generating our model
for (n in 1:N){
modes[n] = background(f[n], a, b, c, d, j, k, numax, phi[8], phi[9], phi[10]);
}
for (mode in 1:M){ // Iterate over all modes passed in
l = ids[mode]; // Identify the Mode ID
for (m in -l:l){ // Iterate over all m in a given l
modes += lorentzian(locs[mode], l, m, f, eps[l+1,abs(m)+1], H[mode], w[mode], nus);
}
}
// Model drawn from a gamma distribution scaled to the model (Anderson+1990)
p ~ gamma(1., 1../modes);
//priors on the parameters
logAmp ~ normal(1.5, 1.);
locs ~ normal(pr_locs, e_locs);
nus_mu ~ normal(nus, 1.);
vsini ~ uniform(0,nus);
alpha ~ normal(0.3, .5);
logGamma ~ multi_normal_cholesky(rep_vector(0., M), LgpG);
phi ~ multi_normal_cholesky(pr_phi, L_sigphi);
logac ~ lognormal(1., 1.);
logdb ~ lognormal(1., 1.);
}
'''
model_path = 'malatium.pkl'
if overwrite:
print('Updating Stan model')
sm = pystan.StanModel(model_code = malatium, model_name='malatium')
pkl_file = open(model_path, 'wb')
pickle.dump(sm, pkl_file)
pkl_file.close()
if os.path.isfile(model_path):
print('Reading in Stan model')
sm = pickle.load(open(model_path, 'rb'))
else:
print('Saving Stan Model')
sm = pystan.StanModel(model_code = malatium, model_name='malatium')
pkl_file = open(model_path, 'wb')
pickle.dump(sm, pkl_file)
pkl_file.close()
class run_stan:
def __init__(self, data, init, dir):
'''Core PyStan class.
Input __init__:
dat (dict): Dictionary of the data in pystan format.
init (dict): Dictionary of initial guesses in pystan format.
'''
self.data = data
self.init = init
self.dir = dir
def read_stan(self):
'''Reads the existing stanmodel'''
model_path = 'malatium.pkl'
if os.path.isfile(model_path):
sm = pickle.load(open(model_path, 'rb'))
else:
print('No stan model found')
create_model(overwrite=True)
sm = pickle.load(open(model_path, 'rb'))
return sm
def run_stan(self):
'''Runs PyStan'''
sm = self.read_stan()
fit = sm.sampling(data = self.data,
iter= __iter__, chains=4, seed=1895,
init = [self.init, self.init, self.init, self.init])
return fit
def out_corner(self, fit):
labels=['vsini','vcosi','i','nus', 'alpha']
verbose = [r'$\nu_{\rm s}\sin(i)$',r'$\nu_{\rm s}\cos(i)$',r'$i$',
r'$\nu_{\rm s}$', r'$\alpha$']
chain = np.array([fit[label] for label in labels])
corner.corner(chain.T, labels=verbose, quantiles=[0.16, 0.5, 0.84],
show_titles=True)
plt.savefig(self.dir+'corner.png')
plt.close('all')
def out_stanplot(self, fit):
fit.plot(pars=['vsini','vcosi','i','nus','H','logAmp','logGamma','alpha'])
plt.savefig(self.dir+'stanplot.png')
plt.close('all')
def _get_epsilon(self, i, l, m):
#I use the prescriptions from Gizon & Solank 2003 and Handberg & Campante 2012
if l == 0:
return 1
if l == 1:
if m == 0:
return np.cos(i)**2
if np.abs(m) == 1:
return 0.5 * np.sin(i)**2
if l == 2:
if m == 0:
return 0.25 * (3 * np.cos(i)**2 - 1)**2
if np.abs(m) ==1:
return (3/8)*np.sin(2*i)**2
if np.abs(m) == 2:
return (3/8) * np.sin(i)**4
if l == 3:
if m == 0:
return (1/64)*(5*np.cos(3*i) + 3*np.cos(i))**2
if np.abs(m) == 1:
return (3/64)*(5*np.cos(2*i) + 3)**2 * np.sin(i)**2
if np.abs(m) == 2:
return (15/8) * np.cos(i)**2 * np.sin(i)**4
if np.abs(m) == 3:
return (5/16)*np.sin(i)**6
def _lorentzian(self, f, l, m, loc, i, H, w, nus):
eps = self._get_epsilon(i,l,m)
model = eps * H / (1 + (4/w**2)*(f - loc + m * nus)**2)
return model
def out_modelplot(self, fit):
model = np.ones(len(self.data['f']))
nus = np.median(fit['nus'])
for mode in range(len(self.data['ids'])):
l = self.data['ids'][mode]
for m in range(-l, l+1):
loc = np.median(fit['locs'].T[mode])
H = np.median(fit['H'].T[mode])
w = np.median(fit['w'].T[mode])
model += self._lorentzian(f, l, m, loc, i, H, w, nus)
fitlocs = np.median(fit['locs'],axis=0)
pg = lk.Periodogram(data['f']*u.microhertz, data['p']*(cds.ppm**2/u.microhertz))
ax = pg.plot(alpha=.5, label='Data')
plt.scatter(fitlocs, [15]*len(fitlocs),c='k',s=25, label='fit locs')
plt.scatter(data['pr_locs'], [15]*len(data['pr_locs']),c='r',s=5, label='true locs')
plt.plot(data['f'], model, linewidth=1, label='Model')
plt.legend()
plt.savefig(self.dir+'modelplot.png')
plt.close('all')
def _kernel(self, x, y, p):
return p[0]**2 * np.exp(-0.5 * np.subtract.outer(x, y)**2 / p[1]**2)
def _predict(self, t_2, t_1, theta, a, c, y_1, y_v):
B = self._kernel(t_1, t_2, theta).T
A = self._kernel(t_1, t_1, theta).T + np.diag(y_v)
C = self._kernel(t_2, t_2, theta).T
y = c + np.dot(np.dot(B, np.linalg.inv(A)), (y_1 - a))
Sigma = C - np.dot(np.dot(B, np.linalg.inv(A)),B.T)
y_pred = y
sigma_new = np.sqrt(np.diagonal(Sigma))
return y_pred, sigma_new
def _plot_GP(self, ax, t_1, t_2, y_1, s, y_pred, sigmas, label='Observation'):
ax.fill_between(t_2, y_pred-sigmas, y_pred+sigmas, alpha=.5, color='#8d44ad')
ax.plot(t_2, y_pred, c='k')
ax.errorbar(t_1, y_1, yerr=s, fmt='o', capsize=0, label=label)
ax.legend(fontsize=15)
ax.set_ylabel(r'Linewidth [$\mu Hz$]', fontsize=20)
ax.set_xlabel(r'Frequency [$\mu Hz$]', fontsize=20)
ax.legend(fontsize=20)
return ax
def out_gpplot(self, fit):
ws = np.median(fit['logGamma'], axis=0)
ws_std = np.std(fit['logGamma'],axis=0)
flocs = np.median(fit['locs'], axis=0)
alpha = np.median(fit['alpha'])
rho = data['rho']
npts = 500
a = np.zeros(len(flocs))
c = np.zeros(npts)
flocs2 = np.linspace(np.min(flocs), np.max(flocs), npts)
theta = [alpha, rho]
ws_pred, sigmas = self._predict(flocs2, flocs, theta, a, c, ws, ws_std**2)
fig, ax = plt.subplots(figsize=(12,8))
ax = self._plot_GP(ax, flocs, flocs2, ws, ws_std, ws_pred, sigmas)
ax.set_xlim(flocs.min()-5*.3, flocs.max()+5*.3)
plt.savefig(self.dir+'gpplot.png')
plt.close('all')
def _harvey(self, f, a, b, c):
harvey = 0.9*a**2/b/(1.0 + (f/b)**c);
return harvey
def _get_apodization(self, freqs, nyquist):
x = (np.pi * freqs) / (2 * nyquist)
return (np.sin(x)/x)**2
def _get_background(self, f, a, b, c, d, j, k, white, numax, scale, nyq):
background = np.zeros(len(f))
background += self._get_apodization(f, nyq) * scale\
* (self._harvey(f, a, b, 4.) + self._harvey(f, c, d, 4.) + self._harvey(f, j, k, 2.))\
+ white
return background
def out_backplot(self, fit):
res = | np.median(fit['phi'],axis=0) | numpy.median |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains some math utils that are used in the chemenv package.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "<NAME>"
__version__ = "2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Feb 20, 2016"
from math import sqrt
import numpy as np
from scipy.special import erf
from functools import reduce
##############################################################
### cartesian product of lists ##################################
##############################################################
def _append_es2sequences(sequences, es):
result = []
if not sequences:
for e in es:
result.append([e])
else:
for e in es:
result += [seq+[e] for seq in sequences]
return result
def _cartesian_product(lists):
"""
given a list of lists,
returns all the possible combinations taking one element from each list
The list does not have to be of equal length
"""
return reduce(_append_es2sequences, lists, [])
def prime_factors(n):
"""Lists prime factors of a given natural integer, from greatest to smallest
:param n: Natural integer
:rtype : list of all prime factors of the given natural n
"""
i = 2
while i <= sqrt(n):
if n % i == 0:
l = prime_factors(n/i)
l.append(i)
return l
i += 1
return [n] # n is prime
def _factor_generator(n):
"""
From a given natural integer, returns the prime factors and their multiplicity
:param n: Natural integer
:return:
"""
p = prime_factors(n)
factors = {}
for p1 in p:
try:
factors[p1] += 1
except KeyError:
factors[p1] = 1
return factors
def divisors(n):
"""
From a given natural integer, returns the list of divisors in ascending order
:param n: Natural integer
:return: List of divisors of n in ascending order
"""
factors = _factor_generator(n)
_divisors = []
listexponents = [[k**x for x in range(0, factors[k]+1)] for k in list(factors.keys())]
listfactors = _cartesian_product(listexponents)
for f in listfactors:
_divisors.append(reduce(lambda x, y: x*y, f, 1))
_divisors.sort()
return _divisors
def get_center_of_arc(p1, p2, radius):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
dd = np.sqrt(dx*dx + dy*dy)
radical = np.power((radius / dd), 2) - 0.25
if radical < 0:
raise ValueError("Impossible to find center of arc because the arc is ill-defined")
tt = np.sqrt(radical)
if radius > 0:
tt = -tt
return (p1[0] + p2[0]) / 2 - tt * dy, (p1[1] + p2[1]) / 2 + tt * dx
def get_linearly_independent_vectors(vectors_list):
independent_vectors_list = []
for vector in vectors_list:
if np.any(vector != 0):
if len(independent_vectors_list) == 0:
independent_vectors_list.append(np.array(vector))
elif len(independent_vectors_list) == 1:
rank = np.linalg.matrix_rank(np.array([independent_vectors_list[0], vector, [0, 0, 0]]))
if rank == 2:
independent_vectors_list.append(np.array(vector))
elif len(independent_vectors_list) == 2:
mm = np.array([independent_vectors_list[0], independent_vectors_list[1], vector])
if np.linalg.det(mm) != 0:
independent_vectors_list.append(np.array(vector))
if len(independent_vectors_list) == 3:
break
return independent_vectors_list
def scale_and_clamp(xx, edge0, edge1, clamp0, clamp1):
return np.clip((xx-edge0) / (edge1-edge0), clamp0, clamp1)
#Step function based on the cumulative distribution function of the normal law
def normal_cdf_step(xx, mean, scale):
return 0.5 * (1.0 + erf( (xx-mean) / (np.sqrt(2.0) * scale)) )
#SMOOTH STEP FUNCTIONS
#Set of smooth step functions that allow to smoothly go from y = 0.0 (1.0) to y = 1.0 (0.0) by changing x
# from 0.0 to 1.0 respectively when inverse is False (True).
# (except if edges is given in which case a the values are first scaled and clamped to the interval given by edges)
#The derivative at x = 0.0 and x = 1.0 have to be 0.0
def smoothstep(xx, edges=None, inverse=False):
if edges is None:
xx_clipped = np.clip(xx, 0.0, 1.0)
if inverse:
return 1.0-xx_clipped*xx_clipped*(3.0-2.0*xx_clipped)
else:
return xx_clipped*xx_clipped*(3.0-2.0*xx_clipped)
else:
xx_scaled_and_clamped = scale_and_clamp(xx, edges[0], edges[1], 0.0, 1.0)
return smoothstep(xx_scaled_and_clamped, inverse=inverse)
def smootherstep(xx, edges=None, inverse=False):
if edges is None:
xx_clipped = np.clip(xx, 0.0, 1.0)
if inverse:
return 1.0-xx_clipped*xx_clipped*xx_clipped*(xx_clipped*(xx_clipped*6-15)+10)
else:
return xx_clipped*xx_clipped*xx_clipped*(xx_clipped*(xx_clipped*6-15)+10)
else:
xx_scaled_and_clamped = scale_and_clamp(xx, edges[0], edges[1], 0.0, 1.0)
return smootherstep(xx_scaled_and_clamped, inverse=inverse)
def cosinus_step(xx, edges=None, inverse=False):
if edges is None:
xx_clipped = np.clip(xx, 0.0, 1.0)
if inverse:
return (np.cos(xx_clipped*np.pi) + 1.0) / 2.0
else:
return 1.0-(np.cos(xx_clipped*np.pi) + 1.0) / 2.0
else:
xx_scaled_and_clamped = scale_and_clamp(xx, edges[0], edges[1], 0.0, 1.0)
return cosinus_step(xx_scaled_and_clamped, inverse=inverse)
def power3_step(xx, edges=None, inverse=False):
return smoothstep(xx, edges=edges, inverse=inverse)
def powern_parts_step(xx, edges=None, inverse=False, nn=2):
if edges is None:
aa = np.power(0.5, 1.0-nn)
xx_clipped = np.clip(xx, 0.0, 1.0)
if np.mod(nn, 2) == 0:
if inverse:
return 1.0-np.where(xx_clipped < 0.5, aa*np.power(xx_clipped, nn), 1.0-aa*np.power(xx_clipped-1.0, nn))
else:
return np.where(xx_clipped < 0.5, aa*np.power(xx_clipped, nn), 1.0-aa*np.power(xx_clipped-1.0, nn))
else:
if inverse:
return 1.0-np.where(xx_clipped < 0.5, aa*np.power(xx_clipped, nn), 1.0+aa*np.power(xx_clipped-1.0, nn))
else:
return np.where(xx_clipped < 0.5, aa*np.power(xx_clipped, nn), 1.0+aa*np.power(xx_clipped-1.0, nn))
else:
xx_scaled_and_clamped = scale_and_clamp(xx, edges[0], edges[1], 0.0, 1.0)
return powern_parts_step(xx_scaled_and_clamped, inverse=inverse, nn=nn)
#FINITE DECREASING FUNCTIONS
#Set of decreasing functions that allow to smoothly go from y = 1.0 to y = 0.0 by changing x from 0.0 to 1.0
#The derivative at x = 1.0 has to be 0.0
def powern_decreasing(xx, edges=None, nn=2):
if edges is None:
aa = 1.0/np.power(-1.0, nn)
return aa * np.power(xx-1.0, nn)
else:
xx_scaled_and_clamped = scale_and_clamp(xx, edges[0], edges[1], 0.0, 1.0)
return powern_decreasing(xx_scaled_and_clamped, nn=nn)
def power2_decreasing_exp(xx, edges=None, alpha=1.0):
if edges is None:
aa = 1.0/np.power(-1.0, 2)
return aa * | np.power(xx-1.0, 2) | numpy.power |
"""
Functions for making a consistent dataset with fixed and free variables as is expected in our dataset.
"""
import logging
import sys
from itertools import chain
from pathlib import Path
import numpy as np
import pandas as pd
import sympy
from src.util import get_free_fluxes
RT = 0.008314 * 298.15
logger = logging.getLogger(__name__)
def namevec(name, vec):
return [f"{name}_{i}" for i in range(len(vec))]
def calc_internal_fluxes(s_gamma, e, b, dgf, c):
""" From a set of parameters, calculate the fluxes"""
dgr = s_gamma.T @ (dgf + RT * np.log(c))
return dgr.multiply(b * e, axis=0)
def get_s_x(S, b, e, exchange_rxns):
""" Get the modified s matrix for calculating the free and fixed fluxes
"""
n_exchange = exchange_rxns.sum()
n_mets, n_rxns = S.shape
s_x = np.zeros((n_rxns, n_exchange + n_mets))
s_x[:n_exchange, :n_exchange] = np.identity(n_exchange)
s_x[n_exchange:, n_exchange:] = S.loc[:, ~exchange_rxns].T.mul(b * e, axis=0)
return s_x
def get_s_c(S, b, e, exchange_rxns):
s_x = get_s_x(S, b, e, exchange_rxns)
return S.values @ s_x
def calc_fixed(S, b, e, c_free, t_free, dgf, free_vars):
"""
Calculate all fixed parameters from the free parameters
"""
num_mets, num_rxns = S.shape
exchange_rxns = S.columns.str.contains("SK_") | S.columns.str.contains("EX_")
# Check that they are at the start of the reactions
assert ~any(exchange_rxns[exchange_rxns.sum():]), "All reactions should be first"
# Determine the s_c and s_x matrices
s_c = get_s_c(S, b, e, exchange_rxns)
s_x = get_s_x(S, b, e, exchange_rxns)
# More useful numbers
num_exchange = exchange_rxns.sum()
num_x = num_exchange + num_mets
# Define some masks for the different parts of the x vector
conc_x = np.full(num_x, False)
conc_x[num_exchange:] = True
free_c_mask = free_vars[conc_x]
fixed_c_mask = ~free_vars[conc_x]
# Calculate the rhs of the equation (from the free vars)
x = np.full(num_x, np.NAN)
assert len(c_free) == free_c_mask.sum(), "The number of free c must be correct"
assert len(t_free) == free_vars[~conc_x].sum(), "The number of free t must be correct"
x[conc_x & free_vars] = dgf[free_c_mask] + RT * c_free
x[~conc_x & free_vars] = t_free
rhs = -s_c[:, free_vars] @ x[free_vars]
# Determine the corresponding fixed variables
x[~free_vars] = np.linalg.solve(s_c[:, ~free_vars], rhs)
# Back-calculate all the fixed variables
c = np.zeros(num_mets)
c[free_c_mask] = c_free # The concentration vars of the fixed variables
c[fixed_c_mask] = (x[~free_vars & conc_x] - dgf[fixed_c_mask]) / RT
# Calculate the fluxes
# Exchange fluxes
v = s_x @ x
check_fluxes(S, b, c, conc_x, dgf, e, exchange_rxns, num_rxns, s_c, s_x, x)
return v, c
def check_fluxes(S, b, c, conc_x, dgf, e, exchange_rxns, num_rxns, s_c, s_x, x):
# Check the s_x matrix
assert all(S @ s_x @ x < 1e-10), "All conc changes should be approximately 0"
# Check the s_c matrix
assert all(s_c @ x < 1e-10), "All conc changes should be approximately 0"
# Check the standard calculation
test_v = np.zeros(num_rxns)
dgr = S.T[~exchange_rxns] @ (dgf + RT * c)
test_v[~exchange_rxns] = dgr * b * e
test_v[exchange_rxns] = x[~conc_x]
assert all(S @ test_v < 1e-10)
def find_params(temp_dir):
""" Make a dataframe filled with samples of model parameters that have reasonable values"""
# Now write the measurements to file
result_dir = temp_dir / "results"
S = pd.read_csv(temp_dir / "stoichiometry.csv", index_col=0)
exchange_rxns = S.columns.str.contains("SK_") | S.columns.str.contains("EX_")
# Get the free and fixed fluxes
n_internal = (~exchange_rxns).sum()
exchange_rxns = S.columns.str.contains("SK_") | S.columns.str.contains("EX_")
s_c = get_s_c(S, np.ones(n_internal), np.ones(n_internal), exchange_rxns)
free_vars, _ = get_free_fluxes(np.flip(s_c, axis=1))
free_vars = np.flip(free_vars)
dgf = pd.read_csv(temp_dir / "priors.csv", index_col=1)["loc"]
exchange_rxns = S.columns.str.contains("SK_") | S.columns.str.contains("EX_")
n_internal = (~exchange_rxns).sum()
params = []
for i in range(1000):
c_free = np.exp(np.random.randn(1) * 2 - 8)
t_free = np.array([1])
b = np.exp(np.random.randn(n_internal) * 3 + 3)
e = np.exp(np.random.randn(n_internal) * 2 - 8)
v, c = calc_fixed(S, b, e, np.log(c_free), t_free, dgf, free_vars)
dgr = S.loc[:, ~exchange_rxns].T @ (dgf + RT * c)
# Check for reasonable values of all parameters (including the fixed params)
c_range = (c > -11) & (c < -5)
b_range = (np.log(b) > -4) & (np.log(b) < 8)
e_range = ( | np.log(e) | numpy.log |
"""
creation.py
--------------
Create meshes from primitives, or with operations.
"""
from .base import Trimesh
from .constants import log, tol
from .geometry import faces_to_edges, align_vectors, plane_transform
from . import util
from . import grouping
from . import triangles
from . import transformations as tf
import numpy as np
import collections
try:
# shapely is a soft dependency
from shapely.geometry import Polygon
from shapely.wkb import loads as load_wkb
except BaseException as E:
# shapely will sometimes raise OSErrors
# on import rather than just ImportError
from . import exceptions
# re-raise the exception when someone tries
# to use the module that they don't have
Polygon = exceptions.closure(E)
load_wkb = exceptions.closure(E)
def revolve(linestring,
angle=None,
sections=None,
transform=None,
**kwargs):
"""
Revolve a 2D line string around the 2D Y axis, with a result with
the 2D Y axis pointing along the 3D Z axis.
This function is intended to handle the complexity of indexing
and is intended to be used to create all radially symmetric primitives,
eventually including cylinders, annular cylinders, capsules, cones,
and UV spheres.
Note that if your linestring is closed, it needs to be counterclockwise
if you would like face winding and normals facing outwards.
Parameters
-------------
linestring : (n, 2) float
Lines in 2D which will be revolved
angle : None or float
Angle in radians to revolve curve by
sections : None or int
Number of sections result should have
If not specified default is 32 per revolution
transform : None or (4, 4) float
Transform to apply to mesh after construction
**kwargs : dict
Passed to Trimesh constructor
Returns
--------------
revolved : Trimesh
Mesh representing revolved result
"""
linestring = np.asanyarray(linestring, dtype=np.float64)
# linestring must be ordered 2D points
if len(linestring.shape) != 2 or linestring.shape[1] != 2:
raise ValueError('linestring must be 2D!')
if angle is None:
# default to closing the revolution
angle = np.pi * 2
closed = True
else:
# check passed angle value
closed = angle >= ((np.pi * 2) - 1e-8)
if sections is None:
# default to 32 sections for a full revolution
sections = int(angle / (np.pi * 2) * 32)
# change to face count
sections += 1
# create equally spaced angles
theta = np.linspace(0, angle, sections)
# 2D points around the revolution
points = np.column_stack((np.cos(theta), np.sin(theta)))
# how many points per slice
per = len(linestring)
# use the 2D X component as radius
radius = linestring[:, 0]
# use the 2D Y component as the height along revolution
height = linestring[:, 1]
# a lot of tiling to get our 3D vertices
vertices = np.column_stack((
np.tile(points, (1, per)).reshape((-1, 2)) *
np.tile(radius, len(points)).reshape((-1, 1)),
np.tile(height, len(points))))
if closed:
# should be a duplicate set of vertices
assert np.allclose(vertices[:per],
vertices[-per:])
# chop off duplicate vertices
vertices = vertices[:-per]
if transform is not None:
# apply transform to vertices
vertices = tf.transform_points(vertices, transform)
# how many slices of the pie
slices = len(theta) - 1
# start with a quad for every segment
# this is a superset which will then be reduced
quad = np.array([0, per, 1,
1, per, per + 1])
# stack the faces for a single slice of the revolution
single = np.tile(quad, per).reshape((-1, 3))
# `per` is basically the stride of the vertices
single += np.tile(np.arange(per), (2, 1)).T.reshape((-1, 1))
# remove any zero-area triangle
# this covers many cases without having to think too much
single = single[triangles.area(vertices[single]) > tol.merge]
# how much to offset each slice
# note arange multiplied by vertex stride
# but tiled by the number of faces we actually have
offset = np.tile(np.arange(slices) * per,
(len(single), 1)).T.reshape((-1, 1))
# stack a single slice into N slices
stacked = np.tile(single.ravel(), slices).reshape((-1, 3))
if tol.strict:
# make sure we didn't screw up stacking operation
assert np.allclose(stacked.reshape((-1, single.shape[0], 3)) - single, 0)
# offset stacked and wrap vertices
faces = (stacked + offset) % len(vertices)
# create the mesh from our vertices and faces
mesh = Trimesh(vertices=vertices, faces=faces,
**kwargs)
# strict checks run only in unit tests
if (tol.strict and
np.allclose(radius[[0, -1]], 0.0) or
np.allclose(linestring[0], linestring[-1])):
# if revolved curve starts and ends with zero radius
# it should really be a valid volume, unless the sign
# reversed on the input linestring
assert mesh.is_volume
return mesh
def extrude_polygon(polygon,
height,
transform=None,
triangle_args=None,
**kwargs):
"""
Extrude a 2D shapely polygon into a 3D mesh
Parameters
----------
polygon : shapely.geometry.Polygon
2D geometry to extrude
height : float
Distance to extrude polygon along Z
triangle_args : str or None
Passed to triangle
**kwargs:
passed to Trimesh
Returns
----------
mesh : trimesh.Trimesh
Resulting extrusion as watertight body
"""
# create a triangulation from the polygon
vertices, faces = triangulate_polygon(
polygon, triangle_args=triangle_args, **kwargs)
# extrude that triangulation along Z
mesh = extrude_triangulation(vertices=vertices,
faces=faces,
height=height,
transform=transform,
**kwargs)
return mesh
def sweep_polygon(polygon,
path,
angles=None,
**kwargs):
"""
Extrude a 2D shapely polygon into a 3D mesh along an
arbitrary 3D path. Doesn't handle sharp curvature well.
Parameters
----------
polygon : shapely.geometry.Polygon
Profile to sweep along path
path : (n, 3) float
A path in 3D
angles : (n,) float
Optional rotation angle relative to prior vertex
at each vertex
Returns
-------
mesh : trimesh.Trimesh
Geometry of result
"""
path = np.asanyarray(path, dtype=np.float64)
if not util.is_shape(path, (-1, 3)):
raise ValueError('Path must be (n, 3)!')
# Extract 2D vertices and triangulation
verts_2d = np.array(polygon.exterior)[:-1]
base_verts_2d, faces_2d = triangulate_polygon(polygon, **kwargs)
n = len(verts_2d)
# Create basis for first planar polygon cap
x, y, z = util.generate_basis(path[0] - path[1])
tf_mat = np.ones((4, 4))
tf_mat[:3, :3] = np.c_[x, y, z]
tf_mat[:3, 3] = path[0]
# Compute 3D locations of those vertices
verts_3d = np.c_[verts_2d, np.zeros(n)]
verts_3d = tf.transform_points(verts_3d, tf_mat)
base_verts_3d = np.c_[base_verts_2d,
np.zeros(len(base_verts_2d))]
base_verts_3d = tf.transform_points(base_verts_3d,
tf_mat)
# keep matching sequence of vertices and 0- indexed faces
vertices = [base_verts_3d]
faces = [faces_2d]
# Compute plane normals for each turn --
# each turn induces a plane halfway between the two vectors
v1s = util.unitize(path[1:-1] - path[:-2])
v2s = util.unitize(path[1:-1] - path[2:])
norms = np.cross(np.cross(v1s, v2s), v1s + v2s)
norms[(norms == 0.0).all(1)] = v1s[(norms == 0.0).all(1)]
norms = util.unitize(norms)
final_v1 = util.unitize(path[-1] - path[-2])
norms = np.vstack((norms, final_v1))
v1s = np.vstack((v1s, final_v1))
# Create all side walls by projecting the 3d vertices into each plane
# in succession
for i in range(len(norms)):
verts_3d_prev = verts_3d
# Rotate if needed
if angles is not None:
tf_mat = tf.rotation_matrix(angles[i],
norms[i],
path[i])
verts_3d_prev = tf.transform_points(verts_3d_prev,
tf_mat)
# Project vertices onto plane in 3D
ds = np.einsum('ij,j->i', (path[i + 1] - verts_3d_prev), norms[i])
ds = ds / np.dot(v1s[i], norms[i])
verts_3d_new = np.einsum('i,j->ij', ds, v1s[i]) + verts_3d_prev
# Add to face and vertex lists
new_faces = [[i + n, (i + 1) % n, i] for i in range(n)]
new_faces.extend([[(i - 1) % n + n, i + n, i] for i in range(n)])
# save faces and vertices into a sequence
faces.append(np.array(new_faces))
vertices.append(np.vstack((verts_3d, verts_3d_new)))
verts_3d = verts_3d_new
# do the main stack operation from a sequence to (n,3) arrays
# doing one vstack provides a substantial speedup by
# avoiding a bunch of temporary allocations
vertices, faces = util.append_faces(vertices, faces)
# Create final cap
x, y, z = util.generate_basis(path[-1] - path[-2])
vecs = verts_3d - path[-1]
coords = np.c_[np.einsum('ij,j->i', vecs, x),
np.einsum('ij,j->i', vecs, y)]
base_verts_2d, faces_2d = triangulate_polygon(Polygon(coords))
base_verts_3d = (np.einsum('i,j->ij', base_verts_2d[:, 0], x) +
np.einsum('i,j->ij', base_verts_2d[:, 1], y)) + path[-1]
faces = np.vstack((faces, faces_2d + len(vertices)))
vertices = np.vstack((vertices, base_verts_3d))
return Trimesh(vertices, faces)
def extrude_triangulation(vertices,
faces,
height,
transform=None,
**kwargs):
"""
Extrude a 2D triangulation into a watertight mesh.
Parameters
----------
vertices : (n, 2) float
2D vertices
faces : (m, 3) int
Triangle indexes of vertices
height : float
Distance to extrude triangulation
**kwargs : dict
Passed to Trimesh constructor
Returns
---------
mesh : trimesh.Trimesh
Mesh created from extrusion
"""
vertices = np.asanyarray(vertices, dtype=np.float64)
height = float(height)
faces = np.asanyarray(faces, dtype=np.int64)
if not util.is_shape(vertices, (-1, 2)):
raise ValueError('Vertices must be (n,2)')
if not util.is_shape(faces, (-1, 3)):
raise ValueError('Faces must be (n,3)')
if np.abs(height) < tol.merge:
raise ValueError('Height must be nonzero!')
# make sure triangulation winding is pointing up
normal_test = triangles.normals(
[util.stack_3D(vertices[faces[0]])])[0]
normal_dot = np.dot(normal_test,
[0.0, 0.0, np.sign(height)])[0]
# make sure the triangulation is aligned with the sign of
# the height we've been passed
if normal_dot < 0.0:
faces = np.fliplr(faces)
# stack the (n,3) faces into (3*n, 2) edges
edges = faces_to_edges(faces)
edges_sorted = np.sort(edges, axis=1)
# edges which only occur once are on the boundary of the polygon
# since the triangulation may have subdivided the boundary of the
# shapely polygon, we need to find it again
edges_unique = grouping.group_rows(
edges_sorted, require_count=1)
# (n, 2, 2) set of line segments (positions, not references)
boundary = vertices[edges[edges_unique]]
# we are creating two vertical triangles for every 2D line segment
# on the boundary of the 2D triangulation
vertical = np.tile(boundary.reshape((-1, 2)), 2).reshape((-1, 2))
vertical = np.column_stack((vertical,
np.tile([0, height, 0, height],
len(boundary))))
vertical_faces = np.tile([3, 1, 2, 2, 1, 0],
(len(boundary), 1))
vertical_faces += np.arange(len(boundary)).reshape((-1, 1)) * 4
vertical_faces = vertical_faces.reshape((-1, 3))
# stack the (n,2) vertices with zeros to make them (n, 3)
vertices_3D = util.stack_3D(vertices)
# a sequence of zero- indexed faces, which will then be appended
# with offsets to create the final mesh
faces_seq = [faces[:, ::-1],
faces.copy(),
vertical_faces]
vertices_seq = [vertices_3D,
vertices_3D.copy() + [0.0, 0, height],
vertical]
# append sequences into flat nicely indexed arrays
vertices, faces = util.append_faces(vertices_seq, faces_seq)
if transform is not None:
# apply transform here to avoid later bookkeeping
vertices = tf.transform_points(
vertices, transform)
# if the transform flips the winding flip faces back
# so that the normals will be facing outwards
if tf.flips_winding(transform):
# fliplr makes arrays non-contiguous
faces = np.ascontiguousarray(np.fliplr(faces))
# create mesh object with passed keywords
mesh = Trimesh(vertices=vertices,
faces=faces,
**kwargs)
# only check in strict mode (unit tests)
if tol.strict:
assert mesh.volume > 0.0
return mesh
def triangulate_polygon(polygon,
triangle_args=None,
engine=None,
**kwargs):
"""
Given a shapely polygon create a triangulation using a
python interface to `triangle.c` or mapbox-earcut.
> pip install triangle
> pip install mapbox_earcut
Parameters
---------
polygon : Shapely.geometry.Polygon
Polygon object to be triangulated
triangle_args : str or None
Passed to triangle.triangulate i.e: 'p', 'pq30'
engine : None or str
Any value other than 'earcut' will use `triangle`
Returns
--------------
vertices : (n, 2) float
Points in space
faces : (n, 3) int
Index of vertices that make up triangles
"""
if engine == 'earcut':
from mapbox_earcut import triangulate_float64
# get vertices as sequence where exterior is the first value
vertices = [np.array(polygon.exterior)]
vertices.extend(np.array(i) for i in polygon.interiors)
# record the index from the length of each vertex array
rings = np.cumsum([len(v) for v in vertices])
# stack vertices into (n, 2) float array
vertices = np.vstack(vertices)
# run triangulation
faces = triangulate_float64(vertices, rings).reshape(
(-1, 3)).astype(np.int64).reshape((-1, 3))
return vertices, faces
# do the import here for soft requirement
from triangle import triangulate
# set default triangulation arguments if not specified
if triangle_args is None:
triangle_args = 'p'
# turn the polygon in to vertices, segments, and hole points
arg = _polygon_to_kwargs(polygon)
# run the triangulation
result = triangulate(arg, triangle_args)
return result['vertices'], result['triangles']
def _polygon_to_kwargs(polygon):
"""
Given a shapely polygon generate the data to pass to
the triangle mesh generator
Parameters
---------
polygon : Shapely.geometry.Polygon
Input geometry
Returns
--------
result : dict
Has keys: vertices, segments, holes
"""
if not polygon.is_valid:
raise ValueError('invalid shapely polygon passed!')
def round_trip(start, length):
"""
Given a start index and length, create a series of (n, 2) edges which
create a closed traversal.
Examples
---------
start, length = 0, 3
returns: [(0,1), (1,2), (2,0)]
"""
tiled = np.tile(np.arange(start, start + length).reshape((-1, 1)), 2)
tiled = tiled.reshape(-1)[1:-1].reshape((-1, 2))
tiled = np.vstack((tiled, [tiled[-1][-1], tiled[0][0]]))
return tiled
def add_boundary(boundary, start):
# coords is an (n, 2) ordered list of points on the polygon boundary
# the first and last points are the same, and there are no
# guarantees on points not being duplicated (which will
# later cause meshpy/triangle to shit a brick)
coords = np.array(boundary.coords)
# find indices points which occur only once, and sort them
# to maintain order
unique = np.sort(grouping.unique_rows(coords)[0])
cleaned = coords[unique]
vertices.append(cleaned)
facets.append(round_trip(start, len(cleaned)))
# holes require points inside the region of the hole, which we find
# by creating a polygon from the cleaned boundary region, and then
# using a representative point. You could do things like take the mean of
# the points, but this is more robust (to things like concavity), if
# slower.
test = Polygon(cleaned)
holes.append(np.array(test.representative_point().coords)[0])
return len(cleaned)
# sequence of (n,2) points in space
vertices = collections.deque()
# sequence of (n,2) indices of vertices
facets = collections.deque()
# list of (2) vertices in interior of hole regions
holes = collections.deque()
start = add_boundary(polygon.exterior, 0)
for interior in polygon.interiors:
try:
start += add_boundary(interior, start)
except BaseException:
log.warning('invalid interior, continuing')
continue
# create clean (n,2) float array of vertices
# and (m, 2) int array of facets
# by stacking the sequence of (p,2) arrays
vertices = np.vstack(vertices)
facets = np.vstack(facets).tolist()
# shapely polygons can include a Z component
# strip it out for the triangulation
if vertices.shape[1] == 3:
vertices = vertices[:, :2]
result = {'vertices': vertices,
'segments': facets}
# holes in meshpy lingo are a (h, 2) list of (x,y) points
# which are inside the region of the hole
# we added a hole for the exterior, which we slice away here
holes = np.array(holes)[1:]
if len(holes) > 0:
result['holes'] = holes
return result
def box(extents=None, transform=None, **kwargs):
"""
Return a cuboid.
Parameters
------------
extents : float, or (3,) float
Edge lengths
transform: (4, 4) float
Transformation matrix
**kwargs:
passed to Trimesh to create box
Returns
------------
geometry : trimesh.Trimesh
Mesh of a cuboid
"""
# vertices of the cube
vertices = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1,
1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1],
order='C',
dtype=np.float64).reshape((-1, 3))
vertices -= 0.5
# resize cube based on passed extents
if extents is not None:
extents = np.asanyarray(extents, dtype=np.float64)
if extents.shape != (3,):
raise ValueError('Extents must be (3,)!')
vertices *= extents
else:
extents = np.asarray((1.0, 1.0, 1.0), dtype=np.float64)
# hardcoded face indices
faces = [1, 3, 0, 4, 1, 0, 0, 3, 2, 2, 4, 0, 1, 7, 3, 5, 1, 4,
5, 7, 1, 3, 7, 2, 6, 4, 2, 2, 7, 6, 6, 5, 4, 7, 5, 6]
faces = np.array(faces, order='C', dtype=np.int64).reshape((-1, 3))
face_normals = [-1, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, -1, 0, 0, 1, 0, -1,
0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 1, 0, 1, 0, 0, 1, 0, 0]
face_normals = np.asanyarray(face_normals,
order='C',
dtype=np.float64).reshape(-1, 3)
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'box',
'extents': extents})
box = Trimesh(vertices=vertices,
faces=faces,
face_normals=face_normals,
process=False,
**kwargs)
# do the transform here to preserve face normals
if transform is not None:
box.apply_transform(transform)
return box
def icosahedron():
"""
Create an icosahedron, a 20 faced polyhedron.
Returns
-------------
ico : trimesh.Trimesh
Icosahederon centered at the origin.
"""
t = (1.0 + 5.0**.5) / 2.0
vertices = [-1, t, 0, 1, t, 0, -1, -t, 0, 1, -t, 0, 0, -1, t, 0, 1, t,
0, -1, -t, 0, 1, -t, t, 0, -1, t, 0, 1, -t, 0, -1, -t, 0, 1]
faces = [0, 11, 5, 0, 5, 1, 0, 1, 7, 0, 7, 10, 0, 10, 11,
1, 5, 9, 5, 11, 4, 11, 10, 2, 10, 7, 6, 7, 1, 8,
3, 9, 4, 3, 4, 2, 3, 2, 6, 3, 6, 8, 3, 8, 9,
4, 9, 5, 2, 4, 11, 6, 2, 10, 8, 6, 7, 9, 8, 1]
# scale vertices so each vertex radius is 1.0
vertices = np.reshape(vertices, (-1, 3)) / np.sqrt(2.0 + t)
faces = np.reshape(faces, (-1, 3))
mesh = Trimesh(vertices=vertices,
faces=faces,
process=False)
return mesh
def icosphere(subdivisions=3, radius=1.0, color=None):
"""
Create an isophere centered at the origin.
Parameters
----------
subdivisions : int
How many times to subdivide the mesh.
Note that the number of faces will grow as function of
4 ** subdivisions, so you probably want to keep this under ~5
radius : float
Desired radius of sphere
color: (3,) float or uint8
Desired color of sphere
Returns
---------
ico : trimesh.Trimesh
Meshed sphere
"""
def refine_spherical():
vectors = ico.vertices
scalar = (vectors ** 2).sum(axis=1)**.5
unit = vectors / scalar.reshape((-1, 1))
offset = radius - scalar
ico.vertices += unit * offset.reshape((-1, 1))
ico = icosahedron()
ico._validate = False
for j in range(subdivisions):
ico = ico.subdivide()
refine_spherical()
ico._validate = True
if color is not None:
ico.visual.face_colors = color
ico.metadata.update({'shape': 'sphere',
'radius': radius})
return ico
def uv_sphere(radius=1.0,
count=[32, 32],
theta=None,
phi=None):
"""
Create a UV sphere (latitude + longitude) centered at the
origin. Roughly one order of magnitude faster than an
icosphere but slightly uglier.
Parameters
----------
radius : float
Radius of sphere
count : (2,) int
Number of latitude and longitude lines
theta : (n,) float
Optional theta angles in radians
phi : (n,) float
Optional phi angles in radians
Returns
----------
mesh : trimesh.Trimesh
Mesh of UV sphere with specified parameters
"""
count = np.array(count, dtype=np.int64)
count += np.mod(count, 2)
count[1] *= 2
# generate vertices on a sphere using spherical coordinates
if theta is None:
theta = np.linspace(0, np.pi, count[0])
if phi is None:
phi = np.linspace(0, np.pi * 2, count[1])[:-1]
spherical = np.dstack((np.tile(phi, (len(theta), 1)).T,
np.tile(theta, (len(phi), 1)))).reshape((-1, 2))
vertices = util.spherical_to_vector(spherical) * radius
# generate faces by creating a bunch of pie wedges
c = len(theta)
# a quad face as two triangles
pairs = np.array([[c, 0, 1],
[c + 1, c, 1]])
# increment both triangles in each quad face by the same offset
incrementor = np.tile(np.arange(c - 1), (2, 1)).T.reshape((-1, 1))
# create the faces for a single pie wedge of the sphere
strip = np.tile(pairs, (c - 1, 1))
strip += incrementor
# the first and last faces will be degenerate since the first
# and last vertex are identical in the two rows
strip = strip[1:-1]
# tile pie wedges into a sphere
faces = np.vstack([strip + (i * c) for i in range(len(phi))])
# poles are repeated in every strip, so a mask to merge them
mask = np.arange(len(vertices))
# the top pole are all the same vertex
mask[0::c] = 0
# the bottom pole are all the same vertex
mask[c - 1::c] = c - 1
# faces masked to remove the duplicated pole vertices
# and mod to wrap to fill in the last pie wedge
faces = mask[np.mod(faces, len(vertices))]
# we save a lot of time by not processing again
# since we did some bookkeeping mesh is watertight
mesh = Trimesh(vertices=vertices, faces=faces, process=False,
metadata={'shape': 'sphere',
'radius': radius})
return mesh
def capsule(height=1.0,
radius=1.0,
count=[32, 32]):
"""
Create a mesh of a capsule, or a cylinder with hemispheric ends.
Parameters
----------
height : float
Center to center distance of two spheres
radius : float
Radius of the cylinder and hemispheres
count : (2,) int
Number of sections on latitude and longitude
Returns
----------
capsule : trimesh.Trimesh
Capsule geometry with:
- cylinder axis is along Z
- one hemisphere is centered at the origin
- other hemisphere is centered along the Z axis at height
"""
height = float(height)
radius = float(radius)
count = np.array(count, dtype=np.int64)
count += np.mod(count, 2)
# create a theta where there is a double band around the equator
# so that we can offset the top and bottom of a sphere to
# get a nicely meshed capsule
theta = np.linspace(0, np.pi, count[0])
center = np.clip(np.arctan(tol.merge / radius),
tol.merge, np.inf)
offset = np.array([-center, center]) + (np.pi / 2)
theta = np.insert(theta,
int(len(theta) / 2),
offset)
capsule = uv_sphere(radius=radius,
count=count,
theta=theta)
top = capsule.vertices[:, 2] > tol.zero
capsule.vertices[top] += [0, 0, height]
capsule.metadata.update({'shape': 'capsule',
'height': height,
'radius': radius})
return capsule
def cone(radius,
height,
sections=None,
transform=None,
**kwargs):
"""
Create a mesh of a cone along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float
The height of the cylinder
sections : int or None
How many pie wedges per revolution
transform : (4, 4) float or None
Transform to apply after creation
**kwargs : dict
Passed to Trimesh constructor
Returns
----------
cone: trimesh.Trimesh
Resulting mesh of a cone
"""
# create the 2D outline of a cone
linestring = [[0, 0],
[radius, 0],
[0, height]]
# revolve the profile to create a cone
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'cone',
'radius': radius,
'height': height})
cone = revolve(linestring=linestring,
sections=sections,
transform=transform,
**kwargs)
return cone
def cylinder(radius,
height=None,
sections=None,
segment=None,
transform=None,
**kwargs):
"""
Create a mesh of a cylinder along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float or None
The height of the cylinder
sections : int or None
How many pie wedges should the cylinder have
segment : (2, 3) float
Endpoints of axis, overrides transform and height
transform : (4, 4) float
Transform to apply
**kwargs:
passed to Trimesh to create cylinder
Returns
----------
cylinder: trimesh.Trimesh
Resulting mesh of a cylinder
"""
if segment is not None:
# override transform and height with the segment
transform, height = _segment_to_cylinder(segment=segment)
if height is None:
raise ValueError('either `height` or `segment` must be passed!')
half = abs(float(height)) / 2.0
# create a profile to revolve
linestring = [[0, -half],
[radius, -half],
[radius, half],
[0, half]]
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'cylinder',
'height': height,
'radius': radius})
# generate cylinder through simple revolution
return revolve(linestring=linestring,
sections=sections,
transform=transform,
**kwargs)
def annulus(r_min,
r_max,
height=None,
sections=None,
transform=None,
segment=None,
**kwargs):
"""
Create a mesh of an annular cylinder along Z centered at the origin.
Parameters
----------
r_min : float
The inner radius of the annular cylinder
r_max : float
The outer radius of the annular cylinder
height : float
The height of the annular cylinder
sections : int or None
How many pie wedges should the annular cylinder have
transform : (4, 4) float or None
Transform to apply to move result from the origin
segment : None or (2, 3) float
Override transform and height with a line segment
**kwargs:
passed to Trimesh to create annulus
Returns
----------
annulus : trimesh.Trimesh
Mesh of annular cylinder
"""
if segment is not None:
# override transform and height with the segment if passed
transform, height = _segment_to_cylinder(segment=segment)
if height is None:
raise ValueError('either `height` or `segment` must be passed!')
r_min = abs(float(r_min))
# if center radius is zero this is a cylinder
if r_min < tol.merge:
return cylinder(radius=r_max,
height=height,
sections=sections,
transform=transform)
r_max = abs(float(r_max))
# we're going to center at XY plane so take half the height
half = abs(float(height)) / 2.0
# create counter-clockwise rectangle
linestring = [[r_min, -half],
[r_max, -half],
[r_max, half],
[r_min, half],
[r_min, -half]]
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'annulus',
'r_min': r_min,
'r_max': r_max,
'height': height})
# revolve the curve
annulus = revolve(linestring=linestring,
sections=sections,
transform=transform,
**kwargs)
return annulus
def _segment_to_cylinder(segment):
"""
Convert a line segment to a transform and height for a cylinder
or cylinder-like primitive.
Parameters
-----------
segment : (2, 3) float
3D line segment in space
Returns
-----------
transform : (4, 4) float
Matrix to move a Z-extruded origin cylinder to segment
height : float
The height of the cylinder needed
"""
segment = np.asanyarray(segment, dtype=np.float64)
if segment.shape != (2, 3):
raise ValueError('segment must be 2 3D points!')
vector = segment[1] - segment[0]
# override height with segment length
height = np.linalg.norm(vector)
# point in middle of line
midpoint = segment[0] + (vector * 0.5)
# align Z with our desired direction
rotation = align_vectors([0, 0, 1], vector)
# translate to midpoint of segment
translation = tf.translation_matrix(midpoint)
# compound the rotation and translation
transform = np.dot(translation, rotation)
return transform, height
def random_soup(face_count=100):
"""
Return random triangles as a Trimesh
Parameters
-----------
face_count : int
Number of faces desired in mesh
Returns
-----------
soup : trimesh.Trimesh
Geometry with face_count random faces
"""
vertices = np.random.random((face_count * 3, 3)) - 0.5
faces = np.arange(face_count * 3).reshape((-1, 3))
soup = Trimesh(vertices=vertices, faces=faces)
return soup
def axis(origin_size=0.04,
transform=None,
origin_color=None,
axis_radius=None,
axis_length=None):
"""
Return an XYZ axis marker as a Trimesh, which represents position
and orientation. If you set the origin size the other parameters
will be set relative to it.
Parameters
----------
transform : (4, 4) float
Transformation matrix
origin_size : float
Radius of sphere that represents the origin
origin_color : (3,) float or int, uint8 or float
Color of the origin
axis_radius : float
Radius of cylinder that represents x, y, z axis
axis_length: float
Length of cylinder that represents x, y, z axis
Returns
-------
marker : trimesh.Trimesh
Mesh geometry of axis indicators
"""
# the size of the ball representing the origin
origin_size = float(origin_size)
# set the transform and use origin-relative
# sized for other parameters if not specified
if transform is None:
transform = np.eye(4)
if origin_color is None:
origin_color = [255, 255, 255, 255]
if axis_radius is None:
axis_radius = origin_size / 5.0
if axis_length is None:
axis_length = origin_size * 10.0
# generate a ball for the origin
axis_origin = uv_sphere(radius=origin_size,
count=[10, 10])
axis_origin.apply_transform(transform)
# apply color to the origin ball
axis_origin.visual.face_colors = origin_color
# create the cylinder for the z-axis
translation = tf.translation_matrix(
[0, 0, axis_length / 2])
z_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(translation))
# XYZ->RGB, Z is blue
z_axis.visual.face_colors = [0, 0, 255]
# create the cylinder for the y-axis
translation = tf.translation_matrix(
[0, 0, axis_length / 2])
rotation = tf.rotation_matrix(np.radians(-90),
[1, 0, 0])
y_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, Y is green
y_axis.visual.face_colors = [0, 255, 0]
# create the cylinder for the x-axis
translation = tf.translation_matrix(
[0, 0, axis_length / 2])
rotation = tf.rotation_matrix(np.radians(90),
[0, 1, 0])
x_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, X is red
x_axis.visual.face_colors = [255, 0, 0]
# append the sphere and three cylinders
marker = util.concatenate([axis_origin,
x_axis,
y_axis,
z_axis])
return marker
def camera_marker(camera,
marker_height=0.4,
origin_size=None):
"""
Create a visual marker for a camera object, including an axis and FOV.
Parameters
---------------
camera : trimesh.scene.Camera
Camera object with FOV and transform defined
marker_height : float
How far along the camera Z should FOV indicators be
origin_size : float
Sphere radius of the origin (default: marker_height / 10.0)
Returns
------------
meshes : list
Contains Trimesh and Path3D objects which can be visualized
"""
# create sane origin size from marker height
if origin_size is None:
origin_size = marker_height / 10.0
# append the visualizations to an array
meshes = [axis(origin_size=origin_size)]
try:
# path is a soft dependency
from .path.exchange.load import load_path
except ImportError:
# they probably don't have shapely installed
log.warning('unable to create FOV visualization!',
exc_info=True)
return meshes
# calculate vertices from camera FOV angles
x = marker_height * np.tan(np.deg2rad(camera.fov[0]) / 2.0)
y = marker_height * np.tan( | np.deg2rad(camera.fov[1]) | numpy.deg2rad |
def calculateAnyProfile(profileType, df_labs, df_meds, df_procedures, df_diagnoses, df_phenotypes):
"""Calculate a single profile based on the type provided and data cleaned from getSubdemographicsTables
Arguments:
profileType -- which individual profile type you would like generated, this will be the category with the header information
(Options: 'labs', 'medications', 'procedures', 'diagnoses', 'phenotypes')
Keywords:
df_labs -- labs dataframe returned from getSubdemographicsTables
df_medications -- medications dataframe returned from getSubdemographicsTables
df_procedures -- procedures dataframe returned from getSubdemographicsTables
df_diagnoses -- diagnoses dataframe returned from getSubdemographicsTables
df_phenotypes -- phenotypes dataframe returned from getSubdemographicsTables
Returns Pythonic structures needed to generate profile in JSON format using the corresponding write profile function
"""
import os
import sys
import sqlalchemy
import urllib.parse
import pandas as pd
import numpy as np
import getpass
from dataclasses import dataclass
from SciServer import Authentication
from datetime import datetime
import pymssql
try:
# Make Labs Profile
if profileType == 'labs':
# High Level Info, Scalar Distribution
labs_counts = df_labs.LAB_LOINC.value_counts()
grouped_labs = df_labs.groupby(['LAB_LOINC', 'resultYear'])
labs_frequencyPerYear = (df_labs.groupby(['LAB_LOINC','PATID','resultYear']).PATID.size()
.groupby(['LAB_LOINC','resultYear']).aggregate(np.mean))
labs_fractionOfSubjects = (np.divide(df_labs.groupby(['LAB_LOINC']).PATID.nunique(),
df_labs.PATID.nunique()))
labs_units = df_labs.groupby(['LAB_LOINC']).LOINC_UNIT.unique()
labs_names = df_labs.groupby(['LAB_LOINC']).LOINC_SHORTNAME.unique()
def percentile(n):
def percentile_(x):
return x.quantile(n*0.01)
percentile_.__name__ = '%s' % n
return percentile_
labs_stats = (grouped_labs
.RESULT_NUM.agg(['min','max', 'mean','median','std',
percentile(10), percentile(20), percentile(30),
percentile(40), percentile(50), percentile(60),
percentile(70), percentile(80), percentile(90)]))
def fracsAboveBelowNormal(x):
try:
aboveNorm = np.divide(np.sum(x.RESULT_NUM > x.range_high), x.RESULT_NUM.size)
belowNorm = np.divide(np.sum(x.RESULT_NUM < x.range_low), x.RESULT_NUM.size)
return pd.Series({'aboveNorm':aboveNorm, 'belowNorm':belowNorm})
except:
return pd.Series({'aboveNorm':np.nan, 'belowNorm':np.nan})
labs_aboveBelowNorm = (grouped_labs.apply(fracsAboveBelowNormal))
labs_correlatedLabsCoefficients = (df_labs.groupby(['LAB_LOINC','resultYear','PATID'])
.RESULT_NUM.mean())
labs_abscorrelation = 0
## LABS TO MEDICATIONS
def patientsAboveBelowNormalLabsMeds(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to meds table
abnormalPatientsMeds = df_meds[df_meds.PATID.isin(patientsAboveBelowNorm) &
(df_meds.startYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'medsAboveBelowNorm': abnormalPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': abnormalPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedMedsCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedMedsCoefficients.index:
thisLabYear = labs_correlatedMedsCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for medInd in range(len(labs_correlatedMedsCoefficients.loc[lab].medsAboveBelowNorm.values)):
mytups.append((thisLabYear.medsAboveBelowNorm.values[medInd], thisLabYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## LABS TO PROCEDURES
def patientsAboveBelowNormalLabsProcs(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsAboveBelowNorm) &
(df_procedures.encounterYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'procsAboveBelowNorm': abnormalPatientsProcs.RAW_PX.value_counts().index,
'counts': abnormalPatientsProcs.RAW_PX.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedProceduresCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedProceduresCoefficients.index:
thisLabYear = labs_correlatedProceduresCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for procInd in range(len(labs_correlatedProceduresCoefficients.loc[lab].procsAboveBelowNorm.values)):
mytups.append((thisLabYear.procsAboveBelowNorm.values[procInd], thisLabYear.counts[procInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedProceduresCoefficients = (pd.DataFrame.from_records(mytups, columns=['RAW_PX','Relative_Counts'],
index=index))
## LABS TO DIAGNOSES
def patientsAboveBelowNormalLabsDiags(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsDiags = df_diagnoses[df_diagnoses.PATID.isin(patientsAboveBelowNorm) &
(df_diagnoses.admitYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'diagsAboveBelowNorm': abnormalPatientsDiags.DX.value_counts().index,
'counts': abnormalPatientsDiags.DX.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedDiagnosisCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedDiagnosisCoefficients.index:
thisLabYear = labs_correlatedDiagnosisCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for diagInd in range(len(labs_correlatedDiagnosisCoefficients.loc[lab].diagsAboveBelowNorm.values)):
mytups.append((thisLabYear.diagsAboveBelowNorm.values[diagInd], thisLabYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedDiagnosisCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
## LABS TO PHENOTYPES
def patientsAboveBelowNormalLabsHPOs(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsHPOs = df_phenotypes[df_phenotypes.PATID.isin(patientsAboveBelowNorm) &
(df_phenotypes.admitYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'hposAboveBelowNorm': abnormalPatientsHPOs.HPO.value_counts().index,
'counts': abnormalPatientsHPOs.HPO.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedPhenotypesCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsHPOs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedPhenotypesCoefficients.index:
thisLabYear = labs_correlatedPhenotypesCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for hpoInd in range(len(labs_correlatedPhenotypesCoefficients.loc[lab].hposAboveBelowNorm.values)):
mytups.append((thisLabYear.hposAboveBelowNorm.values[hpoInd], thisLabYear.counts[hpoInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedPhenotypesCoefficients = (pd.DataFrame.from_records(mytups, columns=['HPO','Relative_Counts'],
index=index))
return (labs_counts, labs_frequencyPerYear, labs_fractionOfSubjects, labs_units, labs_names,
labs_stats, labs_aboveBelowNorm, labs_correlatedLabsCoefficients, labs_abscorrelation,
labs_correlatedMedsCoefficients, labs_correlatedProceduresCoefficients, labs_correlatedDiagnosisCoefficients,
labs_correlatedPhenotypesCoefficients)
# Make Medication Profile
elif profileType == 'medications':
meds_medication = df_meds.JH_INGREDIENT_RXNORM_CODE.unique()
meds_dosageInfo = df_meds.groupby('JH_INGREDIENT_RXNORM_CODE').RX_DOSE_ORDERED.mean()
meds_frequencyPerYear = (df_meds.groupby(['JH_INGREDIENT_RXNORM_CODE','startYear','PATID']).PATID
.count().groupby(['JH_INGREDIENT_RXNORM_CODE','startYear']).mean())
meds_fractionOfSubjects = (np.divide(df_meds.groupby(['JH_INGREDIENT_RXNORM_CODE']).PATID.nunique(),
df_meds.PATID.nunique()))
grouped_meds = df_meds.groupby(['JH_INGREDIENT_RXNORM_CODE', 'startYear'])
#meds_correlatedLabsCoefficients
def patientsAboveBelowNormalMedsLabs(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to labs table
abnormalPatientsLabs = df_labs[(df_labs.PATID.isin(patientsWithThisRX)) &
((df_labs.RESULT_NUM > df_labs.range_high) |
(df_labs.RESULT_NUM < df_labs.range_low)) &
(df_labs.resultYear == pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'labsAboveBelowNorm': abnormalPatientsLabs.LAB_LOINC.value_counts().index,
'counts': abnormalPatientsLabs.LAB_LOINC.value_counts().values})
meds_correlatedLabsCoefficients = (grouped_meds.apply(patientsAboveBelowNormalMedsLabs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedLabsCoefficients.index:
thisMedYear = meds_correlatedLabsCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for labInd in range(len(meds_correlatedLabsCoefficients.loc[med].labsAboveBelowNorm.values)):
mytups.append((thisMedYear.labsAboveBelowNorm.values[labInd], thisMedYear.counts[labInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedLabsCoefficients = (pd.DataFrame.from_records(mytups, columns=['LAB_LOINC','Relative_Counts'],
index=index))
#meds_correlatedDiagsCoefficients
def patientsCrossFreqMedsDiags(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsDXs = df_diagnoses[(df_diagnoses.PATID.isin(patientsWithThisRX)) &
(df_diagnoses.admitYear == pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'diagsCrossFreq': commonPatientsDXs.DX.value_counts().index,
'counts': commonPatientsDXs.DX.value_counts().values})
meds_correlatedDiagsCoefficients = (grouped_meds.apply(patientsCrossFreqMedsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedDiagsCoefficients.index:
thisMedYear = meds_correlatedDiagsCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for diagInd in range(len(meds_correlatedDiagsCoefficients.loc[med].diagsCrossFreq.values)):
mytups.append((thisMedYear.diagsCrossFreq.values[diagInd], thisMedYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedDiagsCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
#meds_correlatedMedsCoefficients
def patientsCrossFreqMedsMeds(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to labs table
commonPatientsMeds = df_meds[(df_meds.PATID.isin(patientsWithThisRX)) &
(pd.to_datetime(df_meds.RX_START_DATE).dt.year ==
pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'medsCrossFreq': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
meds_correlatedMedsCoefficients = (grouped_meds.apply(patientsCrossFreqMedsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedMedsCoefficients.index:
thisMedYear = meds_correlatedMedsCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for medInd in range(len(meds_correlatedMedsCoefficients.loc[med].medsCrossFreq.values)):
mytups.append((thisMedYear.medsCrossFreq.values[medInd], thisMedYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## MEDS TO PROCEDURES
def patientsCrossFreqMedsProcs(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to procs table
commonPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsWithThisRX) &
(df_procedures.encounterYear == pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'procsCrossFreq': commonPatientsProcs.RAW_PX.value_counts().index,
'counts': commonPatientsProcs.RAW_PX.value_counts().values})
meds_correlatedProceduresCoefficients = (grouped_meds.apply(patientsCrossFreqMedsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedProceduresCoefficients.index:
thisMedYear = meds_correlatedProceduresCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for procInd in range(len(meds_correlatedProceduresCoefficients.loc[med].procsCrossFreq.values)):
mytups.append((thisMedYear.procsCrossFreq.values[procInd], thisMedYear.counts[procInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedProceduresCoefficients = (pd.DataFrame.from_records(mytups, columns=['RAW_PX','Relative_Counts'],
index=index))
## MEDS TO HPO
def patientsCrossFreqMedsHPOs(x):
patientsWithThisRX = list(set(x.PATID.tolist()))
# Link to hpo table
commonPatientsHPOs = df_phenotypes[(df_phenotypes.PATID.isin(patientsWithThisRX)) &
(df_phenotypes.admitYear == pd.to_datetime(x.RX_START_DATE).dt.year.unique()[0])]
return pd.Series({'hposCrossFreq': commonPatientsHPOs.HPO.value_counts().index,
'counts': commonPatientsHPOs.HPO.value_counts().values})
meds_correlatedPhenotypesCoefficients = (grouped_meds.apply(patientsCrossFreqMedsHPOs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for med in meds_correlatedPhenotypesCoefficients.index:
thisMedYear = meds_correlatedPhenotypesCoefficients.loc[med]
thisMed = med[0]
thisYear = med[1]
totalCrossTab = np.sum(thisMedYear.counts)
for phenoInd in range(len(meds_correlatedPhenotypesCoefficients.loc[med].hposCrossFreq.values)):
mytups.append((thisMedYear.hposCrossFreq.values[phenoInd], thisMedYear.counts[phenoInd]/totalCrossTab))
multiIndex.append((thisMed, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
meds_correlatedPhenotypesCoefficients = (pd.DataFrame.from_records(mytups, columns=['HPO','Relative_Counts'],
index=index))
return (meds_medication, meds_dosageInfo, meds_frequencyPerYear, meds_fractionOfSubjects,
meds_correlatedLabsCoefficients, meds_correlatedDiagsCoefficients, meds_correlatedMedsCoefficients,
meds_correlatedProceduresCoefficients, meds_correlatedPhenotypesCoefficients)
# Make Procedures Profile
elif profileType == 'procedures':
procedures_code = df_procedures.RAW_PX.unique()
procedures_count = df_procedures.RAW_PX.value_counts()
procedures_frequencyPerYear = (df_procedures.groupby(['RAW_PX','encounterYear','PATID']).PATID.count()
.groupby(['RAW_PX','encounterYear']).mean())
procedures_fractionOfSubjects = (np.divide(df_procedures.groupby(['RAW_PX']).PATID.nunique(),
df_procedures.PATID.nunique()))
grouped_procs = df_procedures.groupby(['RAW_PX', 'encounterYear'])
#procs_correlatedLabsCoefficients
def patientsAboveBelowNormalProcsLabs(x):
patientsWithThisProc = list(set(x.PATID.tolist()))
# Link to labs table
abnormalPatientsLabs = df_labs[(df_labs.PATID.isin(patientsWithThisProc)) &
((df_labs.RESULT_NUM > df_labs.range_high) |
(df_labs.RESULT_NUM < df_labs.range_low)) &
(df_labs.resultYear == pd.to_datetime(x.PX_DATE).dt.year.unique()[0])]
return pd.Series({'labsAboveBelowNorm': abnormalPatientsLabs.LAB_LOINC.value_counts().index,
'counts': abnormalPatientsLabs.LAB_LOINC.value_counts().values})
procs_correlatedLabsCoefficients = (grouped_procs.apply(patientsAboveBelowNormalProcsLabs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for proc in procs_correlatedLabsCoefficients.index:
thisProcYear = procs_correlatedLabsCoefficients.loc[proc]
thisProc = proc[0]
thisYear = proc[1]
totalCrossTab = np.sum(thisProcYear.counts)
for labInd in range(len(procs_correlatedLabsCoefficients.loc[proc].labsAboveBelowNorm.values)):
mytups.append((thisProcYear.labsAboveBelowNorm.values[labInd], thisProcYear.counts[labInd]/totalCrossTab))
multiIndex.append((thisProc, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
procs_correlatedLabsCoefficients = (pd.DataFrame.from_records(mytups, columns=['LAB_LOINC','Relative_Counts'],
index=index))
#procs_correlatedDiagsCoefficients
def patientsCrossFreqProcsDiags(x):
patientsWithThisProc = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsDXs = df_diagnoses[(df_diagnoses.PATID.isin(patientsWithThisProc)) &
(df_diagnoses.admitYear == pd.to_datetime(x.PX_DATE).dt.year.unique()[0])]
return pd.Series({'diagsCrossFreq': commonPatientsDXs.DX.value_counts().index,
'counts': commonPatientsDXs.DX.value_counts().values})
procs_correlatedDiagsCoefficients = (grouped_procs.apply(patientsCrossFreqProcsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for proc in procs_correlatedDiagsCoefficients.index:
thisProcYear = procs_correlatedDiagsCoefficients.loc[proc]
thisProc = proc[0]
thisYear = proc[1]
totalCrossTab = np.sum(thisProcYear.counts)
for diagInd in range(len(procs_correlatedDiagsCoefficients.loc[proc].diagsCrossFreq.values)):
mytups.append((thisProcYear.diagsCrossFreq.values[diagInd], thisProcYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisProc, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
procs_correlatedDiagsCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
#procs_correlatedMedsCoefficients
def patientsCrossFreqProcsMeds(x):
patientsWithThisProc = list(set(x.PATID.tolist()))
# Link to labs table
commonPatientsMeds = df_meds[(df_meds.PATID.isin(patientsWithThisProc)) &
(df_meds.startYear == pd.to_datetime(x.PX_DATE).dt.year.unique()[0])]
return pd.Series({'medsCrossFreq': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
procs_correlatedMedsCoefficients = (grouped_procs.apply(patientsCrossFreqProcsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for proc in procs_correlatedMedsCoefficients.index:
thisProcYear = procs_correlatedMedsCoefficients.loc[proc]
thisProc = proc[0]
thisYear = proc[1]
totalCrossTab = np.sum(thisProcYear.counts)
for medInd in range(len(procs_correlatedMedsCoefficients.loc[proc].medsCrossFreq.values)):
mytups.append((thisProcYear.medsCrossFreq.values[medInd], thisProcYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisProc, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
procs_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## PROCEDURES TO PROCEDURES
def patientsCrossFreqProcsProcs(x):
patientsWithThisProc = list(set(x.PATID.tolist()))
# Link to procs table
commonPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsWithThisProc) &
(df_procedures.encounterYear == pd.to_datetime(x.PX_DATE).dt.year.unique()[0])]
return pd.Series({'procsCrossFreq': commonPatientsProcs.RAW_PX.value_counts().index,
'counts': commonPatientsProcs.RAW_PX.value_counts().values})
procs_correlatedProceduresCoefficients = (grouped_procs.apply(patientsCrossFreqProcsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for proc in procs_correlatedProceduresCoefficients.index:
thisProcYear = procs_correlatedProceduresCoefficients.loc[proc]
thisProc = proc[0]
thisYear = proc[1]
totalCrossTab = np.sum(thisProcYear.counts)
for procInd in range(len(procs_correlatedProceduresCoefficients.loc[proc].procsCrossFreq.values)):
mytups.append((thisProcYear.procsCrossFreq.values[procInd], thisProcYear.counts[procInd]/totalCrossTab))
multiIndex.append((thisProc, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
procs_correlatedProceduresCoefficients = (pd.DataFrame.from_records(mytups, columns=['RAW_PX','Relative_Counts'],
index=index))
# procedures to hpo
def patientsCrossFreqProcsHPOs(x):
patientsWithThisProc = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsHPOs = df_phenotypes[(df_phenotypes.PATID.isin(patientsWithThisProc)) &
(df_phenotypes.admitYear == pd.to_datetime(x.PX_DATE).dt.year.unique()[0])]
return pd.Series({'hposCrossFreq': commonPatientsHPOs.HPO.value_counts().index,
'counts': commonPatientsHPOs.HPO.value_counts().values})
procs_correlatedPhenotypesCoefficients = (grouped_procs.apply(patientsCrossFreqProcsHPOs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for proc in procs_correlatedPhenotypesCoefficients.index:
thisProcYear = procs_correlatedPhenotypesCoefficients.loc[proc]
thisProc = proc[0]
thisYear = proc[1]
totalCrossTab = np.sum(thisProcYear.counts)
for phenoInd in range(len(procs_correlatedPhenotypesCoefficients.loc[proc].hposCrossFreq.values)):
mytups.append((thisProcYear.hposCrossFreq.values[phenoInd], thisProcYear.counts[phenoInd]/totalCrossTab))
multiIndex.append((thisProc, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
procs_correlatedPhenotypesCoefficients = (pd.DataFrame.from_records(mytups, columns=['HPO','Relative_Counts'],
index=index))
return (procedures_code, procedures_count, procedures_frequencyPerYear, procedures_fractionOfSubjects,
procs_correlatedLabsCoefficients, procs_correlatedDiagsCoefficients, procs_correlatedMedsCoefficients,
procs_correlatedProceduresCoefficients, procs_correlatedPhenotypesCoefficients)
# Make Diagnoses Profile
elif profileType == 'diagnoses':
diagnoses_code = df_diagnoses.DX.unique()
diagnoses_count = df_diagnoses.DX.value_counts()
diagnoses_frequencyPerYear = (df_diagnoses.groupby(['DX','admitYear','PATID']).PATID
.count().groupby(['DX','admitYear']).mean())
diagnoses_fractionOfSubjects = (np.divide(df_diagnoses.groupby(['DX']).PATID.nunique(),
df_diagnoses.PATID.nunique()))
grouped_diags = df_diagnoses.groupby(['DX','admitYear'])
#diags_correlatedLabsCoefficients
def patientsAboveBelowNormalDiagsLabs(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to labs table
abnormalPatientsLabs = df_labs[(df_labs.PATID.isin(patientsWithThisDiag)) &
((df_labs.RESULT_NUM > df_labs.range_high) |
(df_labs.RESULT_NUM < df_labs.range_low)) &
(df_labs.resultYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'labsAboveBelowNorm': abnormalPatientsLabs.LAB_LOINC.value_counts().index,
'counts': abnormalPatientsLabs.LAB_LOINC.value_counts().values})
diags_correlatedLabsCoefficients = (grouped_diags.apply(patientsAboveBelowNormalDiagsLabs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in diags_correlatedLabsCoefficients.index:
thisDiagYear = diags_correlatedLabsCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for labInd in range(len(diags_correlatedLabsCoefficients.loc[diag].labsAboveBelowNorm.values)):
mytups.append((thisDiagYear.labsAboveBelowNorm.values[labInd], thisDiagYear.counts[labInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
diags_correlatedLabsCoefficients = (pd.DataFrame.from_records(mytups, columns=['LAB_LOINC','Relative_Counts'],
index=index))
#diags_correlatedDiagsCoefficients
def patientsCrossFreqDiagsDiags(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to diagnoses table
commonPatientsDXs = df_diagnoses[(df_diagnoses.PATID.isin(patientsWithThisDiag)) &
(df_diagnoses.admitYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'diagsCrossFreq': commonPatientsDXs.DX.value_counts().index,
'counts': commonPatientsDXs.DX.value_counts().values})
diags_correlatedDiagsCoefficients = (grouped_diags.apply(patientsCrossFreqDiagsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in diags_correlatedDiagsCoefficients.index:
thisDiagYear = diags_correlatedDiagsCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for diagInd in range(len(diags_correlatedDiagsCoefficients.loc[diag].diagsCrossFreq.values)):
mytups.append((thisDiagYear.diagsCrossFreq.values[diagInd], thisDiagYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
diags_correlatedDiagsCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
#diags_correlatedMedsCoefficients
def patientsCrossFreqDiagsMeds(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to labs table
commonPatientsMeds = df_meds[(df_meds.PATID.isin(patientsWithThisDiag)) &
(df_meds.startYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'medsCrossFreq': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': commonPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
diags_correlatedMedsCoefficients = (grouped_diags.apply(patientsCrossFreqDiagsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in diags_correlatedMedsCoefficients.index:
thisDiagYear = diags_correlatedMedsCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = np.sum(thisDiagYear.counts)
for medInd in range(len(diags_correlatedMedsCoefficients.loc[diag].medsCrossFreq.values)):
mytups.append((thisDiagYear.medsCrossFreq.values[medInd], thisDiagYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisDiag, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
diags_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## DIAGNOSES TO PROCEDURES
def patientsCrossFreqDiagsProcs(x):
patientsWithThisDiag = list(set(x.PATID.tolist()))
# Link to procs table
commonPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsWithThisDiag) &
(df_procedures.encounterYear == pd.to_datetime(x.ADMIT_DATE).dt.year.unique()[0])]
return pd.Series({'procsCrossFreq': commonPatientsProcs.RAW_PX.value_counts().index,
'counts': commonPatientsProcs.RAW_PX.value_counts().values})
diags_correlatedProceduresCoefficients = (grouped_diags.apply(patientsCrossFreqDiagsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for diag in diags_correlatedProceduresCoefficients.index:
thisDiagYear = diags_correlatedProceduresCoefficients.loc[diag]
thisDiag = diag[0]
thisYear = diag[1]
totalCrossTab = | np.sum(thisDiagYear.counts) | numpy.sum |
""" Ops for downsampling images.
Planned:
DownsampleFactorMax, DownsampleAvg, DownsampleSoftmax.
"""
#This file should move along with conv.py
import __builtin__
import numpy
import theano
from theano import gof, Op, tensor, Variable, Apply
def max_pool2D(*args, **kwargs):
import sys
print >> sys.stderr, "DEPRECATION: max_pool2D renamed to max_pool_2d"
return max_pool_2d(*args, **kwargs)
def max_pool_2d(input, ds, ignore_border=False, st=None):
"""
Takes as input a N-D tensor, where N >= 2. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1])
:type input: N-D theano tensor of input images.
:param input: input images. Max pooling will be done over the 2 last
dimensions.
:type ds: tuple of length 2
:param ds: factor by which to downscale (vertical ds, horizontal ds).
(2,2) will halve the image in each dimension.
:type ignore_border: bool
:param ignore_border: When True, (5,5) input with ds=(2,2)
will generate a (2,2) output. (3,3) otherwise.
:type st: tuple of lenght 2
:param st: stride size, which is the number of shifts
over rows/cols to get the the next pool region.
if st is None, it is considered equal to ds
(no overlap on pooling regions)
"""
if input.ndim < 2:
raise NotImplementedError('max_pool_2d requires a dimension >= 2')
if input.ndim == 4:
op = DownsampleFactorMax(ds, ignore_border, st=st)
output = op(input)
return output
# extract image dimensions
img_shape = input.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input.shape[:-2])
batch_size = tensor.shape_padright(batch_size, 1)
# store as 4D tensor with shape: (batch_size,1,height,width)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1]),
img_shape), 'int64')
input_4D = tensor.reshape(input, new_shape, ndim=4)
# downsample mini-batch of images
op = DownsampleFactorMax(ds, ignore_border, st=st)
output = op(input_4D)
# restore to original shape
outshp = tensor.join(0, input.shape[:-2], output.shape[-2:])
return tensor.reshape(output, outshp, ndim=input.ndim)
class DownsampleFactorMax(Op):
"""For N-dimensional tensors, consider that the last two
dimensions span images. This Op downsamples these images by a
factor ds, by taking the max over non- overlapping rectangular
regions.
"""
__props__ = ('ds', 'ignore_border', 'st')
@staticmethod
def out_shape(imgshape, ds, ignore_border=False, st=None):
"""Return the shape of the output from this op, for input of given
shape and flags.
:param imgshape: the shape of a tensor of images. The last two elements
are interpreted as the number of rows, and the number of cols.
:type imgshape: tuple, list, or similar of integer or
scalar Theano variable.
:param ds: downsample factor over rows and columns
this parameter indicates the size of the pooling region
:type ds: list or tuple of two ints
:param st: the stride size. This is the distance between the pooling
regions. If it's set to None, in which case it equlas ds.
:type st: list or tuple of two ints
:param ignore_border: if ds doesn't divide imgshape, do we include an
extra row/col of partial downsampling (False) or ignore it (True).
:type ignore_border: bool
:rtype: list
:returns: the shape of the output from this op, for input of given
shape. This will have the same length as imgshape, but with last
two elements reduced as per the downsampling & ignore_border flags.
"""
if len(imgshape) < 2:
raise TypeError('imgshape must have at least two elements '
'(rows, cols)')
if st is None:
st = ds
r, c = imgshape[-2:]
if ignore_border:
out_r = (r - ds[0]) // st[0] + 1
out_c = (c - ds[1]) // st[1] + 1
if isinstance(r, theano.Variable):
nr = tensor.maximum(out_r, 0)
else:
nr = numpy.maximum(out_r, 0)
if isinstance(c, theano.Variable):
nc = tensor.maximum(out_c, 0)
else:
nc = numpy.maximum(out_c, 0)
else:
if isinstance(r, theano.Variable):
nr = tensor.switch(tensor.ge(st[0], ds[0]),
(r - 1) // st[0] + 1,
tensor.maximum(0, (r - 1 - ds[0])
// st[0] + 1) + 1)
elif st[0] >= ds[0]:
nr = (r - 1) // st[0] + 1
else:
nr = max(0, (r - 1 - ds[0]) // st[0] + 1) + 1
if isinstance(c, theano.Variable):
nc = tensor.switch(tensor.ge(st[1], ds[1]),
(c - 1) // st[1] + 1,
tensor.maximum(0, (c - 1 - ds[1])
// st[1] + 1) + 1)
elif st[1] >= ds[1]:
nc = (c - 1) // st[1] + 1
else:
nc = max(0, (c - 1 - ds[1]) // st[1] + 1) + 1
rval = list(imgshape[:-2]) + [nr, nc]
return rval
def __init__(self, ds, ignore_border=False, st=None):
"""
:param ds: downsample factor over rows and column.
ds indicates the pool region size.
:type ds: list or tuple of two ints
:param ignore_border: if ds doesn't divide imgshape, do we include
an extra row/col of partial downsampling (False) or
ignore it (True).
:type ignore_border: bool
: param st: stride size, which is the number of shifts
over rows/cols to get the the next pool region.
if st is None, it is considered equal to ds
(no overlap on pooling regions)
: type st: list or tuple of two ints
"""
self.ds = tuple(ds)
if not all([isinstance(d, int) for d in ds]):
raise ValueError(
"DownsampleFactorMax downsample parameters must be ints."
" Got %s" % str(ds))
if st is None:
st = ds
self.st = tuple(st)
self.ignore_border = ignore_border
def __str__(self):
return '%s{%s,%s,%s}' % (self.__class__.__name__,
self.ds, self.st, self.ignore_border)
def make_node(self, x):
if x.type.ndim != 4:
raise TypeError()
# TODO: consider restrucing the dtype?
return gof.Apply(self, [x], [x.type()])
def perform(self, node, inp, out):
"""
"""
x, = inp
z, = out
if len(x.shape) != 4:
raise NotImplementedError(
'DownsampleFactorMax requires 4D input for now')
z_shape = self.out_shape(x.shape, self.ds, self.ignore_border, self.st)
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = numpy.empty(self.out_shape(x.shape, self.ds,
self.ignore_border, self.st),
dtype=x.dtype)
zz = z[0]
#number of pooling output rows
pr = zz.shape[-2]
#number of pooling output cols
pc = zz.shape[-1]
ds0, ds1 = self.ds
st0, st1 = self.st
img_rows = x.shape[-2]
img_cols = x.shape[-1]
for n in xrange(x.shape[0]):
for k in xrange(x.shape[1]):
for r in xrange(pr):
row_st = r * st0
row_end = __builtin__.min(row_st + ds0, img_rows)
for c in xrange(pc):
col_st = c * st1
col_end = __builtin__.min(col_st + ds1, img_cols)
zz[n, k, r, c] = x[
n, k, row_st:row_end, col_st:col_end].max()
def infer_shape(self, node, in_shapes):
shp = self.out_shape(in_shapes[0], self.ds,
self.ignore_border, self.st)
return [shp]
def grad(self, inp, grads):
x, = inp
gz, = grads
maxout = self(x)
return [DownsampleFactorMaxGrad(self.ds,
ignore_border=self.ignore_border,
st=self.st)(
x, maxout, gz)]
def c_code(self, node, name, inp, out, sub):
# No implementation is currently for the case where
# the stride size and the pooling size are different.
# An exception is raised for such a case.
if self.ds != self.st:
raise theano.gof.utils.MethodNotDefined()
x, = inp
z, = out
fail = sub['fail']
ignore_border = int(self.ignore_border)
ds0, ds1 = self.ds
return """
int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int x_shp0_usable;
int x_shp1_usable;
int z_shp0, z_shp1;
if(PyArray_NDIM(%(x)s)!=4)
{
PyErr_SetString(PyExc_ValueError, "x must be a 4d ndarray");
%(fail)s;
}
z_shp0 = PyArray_DIMS(%(x)s)[2] / %(ds0)s;
z_shp1 = PyArray_DIMS(%(x)s)[3] / %(ds1)s;
if (%(ignore_border)s)
{
x_shp0_usable = z_shp0 * %(ds0)s;
x_shp1_usable = z_shp1 * %(ds1)s;
}
else
{
z_shp0 += (PyArray_DIMS(%(x)s)[2] %% %(ds0)s) ? 1 : 0;
z_shp1 += (PyArray_DIMS(%(x)s)[3] %% %(ds1)s) ? 1 : 0;
x_shp0_usable = PyArray_DIMS(%(x)s)[2];
x_shp1_usable = PyArray_DIMS(%(x)s)[3];
}
if ((!%(z)s)
|| *PyArray_DIMS(%(z)s)!=4
||(PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(x)s)[0])
||(PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(x)s)[1])
||(PyArray_DIMS(%(z)s)[2] != z_shp0)
||(PyArray_DIMS(%(z)s)[3] != z_shp1)
)
{
if (%(z)s) Py_XDECREF(%(z)s);
npy_intp dims[4] = {0,0,0,0};
dims[0]=PyArray_DIMS(%(x)s)[0];
dims[1]=PyArray_DIMS(%(x)s)[1];
dims[2]=z_shp0;
dims[3]=z_shp1;
//TODO: zeros not necessary
%(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}
if (z_shp0 && z_shp1)
{
for(int b=0;b<PyArray_DIMS(%(x)s)[0];b++){
for(int k=0;k<PyArray_DIMS(%(x)s)[1];k++){
int mini_i = 0;
int zi = 0;
for(int i=0;i< x_shp0_usable; i++){
int mini_j = 0;
int zj = 0;
for(int j=0; j<x_shp1_usable; j++){
dtype_%(x)s a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,b,k,i,j)))[0];
dtype_%(z)s * __restrict__ z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,b,k,zi,zj)));
z[0] = (((mini_j|mini_i) == 0) || z[0] < a) ? a : z[0];
mini_j = ((mini_j + 1) == %(ds1)s) ? 0 : mini_j+1;
zj += (mini_j == 0);
}
mini_i = ((mini_i + 1) == %(ds0)s) ? 0 : mini_i+1;
zi += (mini_i == 0);
}
}
}
}
""" % locals()
def c_code_cache_version(self):
return (0, 1)
class DownsampleFactorMaxGrad(Op):
__props__ = ('ds', 'ignore_border', 'st')
def __init__(self, ds, ignore_border, st=None):
self.ds = tuple(ds)
self.ignore_border = ignore_border
if st is None:
st = ds
self.st = tuple(st)
def __str__(self):
return '%s{%s,%s,%s}' % (self.__class__.__name__,
self.ds, self.st, self.ignore_border)
def make_node(self, x, maxout, gz):
# make_node should only be called by the grad function of
# DownsampleFactorMax, so these asserts should not fail.
assert isinstance(x, Variable) and x.ndim == 4
assert isinstance(maxout, Variable) and maxout.ndim == 4
assert isinstance(gz, Variable) and gz.ndim == 4
return Apply(self, [x, maxout, gz], [x.type()])
def perform(self, node, inp, out):
x, maxout, gz = inp
gx_stg, = out
gx = | numpy.zeros_like(x) | numpy.zeros_like |
import numpy as np
import matplotlib.pyplot as plt
from parameters import *
class Stimulus:
def __init__(self):
# generate tuning functions
self.motion_tuning, self.fix_tuning, self.rule_tuning = self.create_tuning_functions()
def generate_trial(self, test_mode = False, set_rule = None):
if par['trial_type'] in ['DMS','DMRS45','DMRS90','DMRS90ccw','DMRS180','DMC',\
'DMS+DMRS','DMS+DMRS_early_cue', 'DMS+DMRS_full_cue', 'DMS+DMC','DMS+DMRS+DMC','location_DMS']:
trial_info = self.generate_basic_trial(test_mode, set_rule)
elif par['trial_type'] in ['ABBA','ABCA']:
trial_info = self.generate_ABBA_trial(test_mode)
elif par['trial_type'] == 'dualDMS':
trial_info = self.generate_dualDMS_trial(test_mode)
elif par['trial_type'] == 'distractor':
trial_info = self.generate_distractor_trial()
# input activity needs to be non-negative
trial_info['neural_input'] = np.maximum(0., trial_info['neural_input'])
return trial_info
def generate_dualDMS_trial(self, test_mode):
"""
Generate a trial based on "Reactivation of latent working memories with transcranial magnetic stimulation"
Trial outline
1. Dead period
2. Fixation
3. Two sample stimuli presented
4. Delay (cue in middle, and possibly probe later)
5. Test stimulus (to cued modality, match or non-match)
6. Delay (cue in middle, and possibly probe later)
7. Test stimulus
INPUTS:
1. sample_time (duration of sample stimlulus)
2. test_time
3. delay_time
4. cue_time (duration of rule cue, always presented halfway during delay)
5. probe_time (usually set to one time step, always presented 3/4 through delay
"""
test_time_rng = []
mask_time_rng = []
for n in range(2):
test_time_rng.append(range((par['dead_time']+par['fix_time']+par['sample_time']+(n+1)*par['delay_time']+n*par['test_time'])//par['dt'], \
(par['dead_time']+par['fix_time']+par['sample_time']+(n+1)*par['delay_time']+(n+1)*par['test_time'])//par['dt']))
mask_time_rng.append(range((par['dead_time']+par['fix_time']+par['sample_time']+(n+1)*par['delay_time']+n*par['test_time'])//par['dt'], \
(par['dead_time']+par['fix_time']+par['sample_time']+(n+1)*par['delay_time']+n*par['test_time']+par['mask_duration'])//par['dt']))
fix_time_rng = []
fix_time_rng.append(range(par['dead_time']//par['dt'], (par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time'])//par['dt']))
fix_time_rng.append(range((par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time']+par['test_time'])//par['dt'], \
(par['dead_time']+par['fix_time']+par['sample_time']+2*par['delay_time']+par['test_time'])//par['dt']))
# duration of mask after test onset
mask_duration = par['mask_duration']//par['dt']
trial_info = {'desired_output' : np.zeros((par['num_time_steps'], par['batch_size'], par['n_output']),dtype=np.float32),
'train_mask' : np.ones((par['num_time_steps'], par['batch_size']),dtype=np.float32),
'sample' : np.zeros((par['batch_size'],2),dtype=np.int8),
'test' : np.zeros((par['batch_size'],2,2),dtype=np.int8),
'test_mod' : np.zeros((par['batch_size'],2),dtype=np.int8),
'rule' : np.zeros((par['batch_size'],2),dtype=np.int8),
'match' : np.zeros((par['batch_size'],2),dtype=np.int8),
'catch' : np.zeros((par['batch_size'],2),dtype=np.int8),
'probe' : np.zeros((par['batch_size'],2),dtype=np.int8),
'neural_input' : np.random.normal(par['input_mean'], par['noise_in'], size=(par['num_time_steps'], par['batch_size'], par['n_input']))}
for t in range(par['batch_size']):
# generate sample, match, rule and prob params
for i in range(2):
trial_info['sample'][t,i] = np.random.randint(par['num_motion_dirs'])
trial_info['match'][t,i] = np.random.randint(2)
trial_info['rule'][t,i] = np.random.randint(2)
trial_info['catch'][t,i] = np.random.rand() < par['catch_trial_pct']
if i == 1:
# only generate a pulse during 2nd delay epoch
trial_info['probe'][t,i] = np.random.rand() < par['probe_trial_pct']
# determine test stimulu based on sample and match status
for i in range(2):
if test_mode:
trial_info['test'][t,i,0] = np.random.randint(par['num_motion_dirs'])
trial_info['test'][t,i,1] = np.random.randint(par['num_motion_dirs'])
else:
# if trial is not a catch, the upcoming test modality (what the network should be attending to)
# is given by the rule cue
if not trial_info['catch'][t,i]:
trial_info['test_mod'][t,i] = trial_info['rule'][t,i]
else:
trial_info['test_mod'][t,i] = (trial_info['rule'][t,i]+1)%2
# cued test stimulus
if trial_info['match'][t,i] == 1:
trial_info['test'][t,i,0] = trial_info['sample'][t,trial_info['test_mod'][t,i]]
else:
sample = trial_info['sample'][t,trial_info['test_mod'][t,i]]
bad_directions = [sample]
possible_stim = np.setdiff1d(list(range(par['num_motion_dirs'])), bad_directions)
trial_info['test'][t,i,0] = possible_stim[np.random.randint(len(possible_stim))]
# non-cued test stimulus
trial_info['test'][t,i,1] = np.random.randint(par['num_motion_dirs'])
"""
Calculate input neural activity based on trial params
"""
# SAMPLE stimuli
trial_info['neural_input'][par['sample_time_rng'], t, :] += np.reshape(self.motion_tuning[:,0,trial_info['sample'][t,0]],(1,-1))
trial_info['neural_input'][par['sample_time_rng'], t, :] += np.reshape(self.motion_tuning[:,1,trial_info['sample'][t,1]],(1,-1))
# Cued TEST stimuli
trial_info['neural_input'][test_time_rng[0], t, :] += np.reshape(self.motion_tuning[:,trial_info['test_mod'][t,0],trial_info['test'][t,0,0]],(1,-1))
trial_info['neural_input'][test_time_rng[1], t, :] += np.reshape(self.motion_tuning[:,trial_info['test_mod'][t,1],trial_info['test'][t,1,0]],(1,-1))
# Non-cued TEST stimuli
trial_info['neural_input'][test_time_rng[0], t, :] += np.reshape(self.motion_tuning[:,(1+trial_info['test_mod'][t,0])%2,trial_info['test'][t,0,1]],(1,-1))
trial_info['neural_input'][test_time_rng[1], t, :] += np.reshape(self.motion_tuning[:,(1+trial_info['test_mod'][t,1])%2,trial_info['test'][t,1,1]],(1,-1))
# FIXATION
trial_info['neural_input'][fix_time_rng[0], t, :] += np.reshape(self.fix_tuning[:,0],(1,-1))
trial_info['neural_input'][fix_time_rng[1], t, :] += np.reshape(self.fix_tuning[:,0],(1,-1))
# RULE CUE
trial_info['neural_input'][par['rule_time_rng'][0], t, :] += np.reshape(self.rule_tuning[:,trial_info['rule'][t,0]],(1,-1))
trial_info['neural_input'][par['rule_time_rng'][1], t, :] += np.reshape(self.rule_tuning[:,trial_info['rule'][t,1]],(1,-1))
# PROBE
# increase reponse of all stim tuned neurons by 10
"""
if trial_info['probe'][t,0]:
trial_info['neural_input'][:est,probe_time1,t] += 10
if trial_info['probe'][t,1]:
trial_info['neural_input'][:est,probe_time2,t] += 10
"""
"""
Desired outputs
"""
# FIXATION
trial_info['desired_output'][fix_time_rng[0], t, 0] = 1
trial_info['desired_output'][fix_time_rng[1], t, 0] = 1
# TEST 1
trial_info['train_mask'][ test_time_rng[0], t] *= par['test_cost_multiplier'] # can use a greater weight for test period if needed
if trial_info['match'][t,0] == 1:
trial_info['desired_output'][test_time_rng[0], t, 2] = 1
else:
trial_info['desired_output'][test_time_rng[0], t, 1] = 1
# TEST 2
trial_info['train_mask'][ test_time_rng[1], t] *= par['test_cost_multiplier'] # can use a greater weight for test period if needed
if trial_info['match'][t,1] == 1:
trial_info['desired_output'][test_time_rng[1], t, 2] = 1
else:
trial_info['desired_output'][test_time_rng[1], t, 1] = 1
# set to mask equal to zero during the dead time, and during the first times of test stimuli
trial_info['train_mask'][:par['dead_time']//par['dt'], t] = 0
trial_info['train_mask'][mask_time_rng[0], t] = 0
trial_info['train_mask'][mask_time_rng[1], t] = 0
return trial_info
def generate_distractor_trial(self):
# duration of mask after test onset
mask_duration = par['mask_duration']//par['dt']
num_time_steps = (par['dead_time']+par['fix_time']+par['sample_time']+par['distractor_time']+par['test_time']+2*par['delay_time'])//par['dt']
trial_info = {'desired_output' : np.zeros((par['n_output'], num_time_steps, par['batch_size']),dtype=np.float32),
'train_mask' : np.ones((num_time_steps, par['batch_size']),dtype=np.float32),
'sample' : np.zeros((par['batch_size']),dtype=np.int8),
'distractor' : np.zeros((par['batch_size']),dtype=np.int8),
'rule' : np.zeros((par['batch_size']),dtype=np.int8),
'match' : np.zeros((par['batch_size']),dtype=np.int8),
'test' : np.zeros((par['batch_size']),dtype=np.int8),
'neural_input' : np.random.normal(par['input_mean'], par['noise_in'], size=(par['n_input'], num_time_steps, par['batch_size']))}
# set to mask equal to zero during the dead time
# end of trial epochs
distrator_time_rng = range((par['dead_time']+par['fix_time'] + par['sample_time'] + par['delay_time'] )//par['dt'],\
(par['dead_time']+par['fix_time']+par['sample_time'] + par['delay_time'] + par['distractor_time'])//par['dt'])
test_onset = (par['dead_time']+par['fix_time'] + par['distractor_time'] + par['sample_time'] + 2*par['delay_time'])//par['dt']
trial_info['train_mask'][:par['dead_time']//par['dt'], :] = 0
for t in range(par['batch_size']):
"""
Generate trial paramaters
"""
sample_dir = np.random.randint(par['num_motion_dirs'])
distractor_dir = np.random.randint(par['num_motion_dirs'])
trial_info['neural_input'][:, par['sample_time_rng'], t] += np.reshape(self.motion_tuning[:, 0, sample_dir],(-1,1))
trial_info['neural_input'][:, distrator_time_rng, t] += np.reshape(self.motion_tuning[:, 0, distractor_dir],(-1,1))
trial_info['neural_input'][:, :test_onset, t] += np.reshape(self.fix_tuning[:, 0],(-1,1))
"""
Determine the desired network output response
"""
trial_info['desired_output'][0, :test_onset, t] = 1
trial_info['desired_output'][1+sample_dir, test_onset:, t] = 1
trial_info['train_mask'][test_onset:test_onset+mask_duration, t] = 0
trial_info['train_mask'][test_onset:, t] *= par['test_cost_multiplier'] # can use a greater weight for test period if needed
"""
Append trial info
"""
trial_info['sample'][t] = sample_dir
trial_info['distractor'][t] = distractor_dir
return trial_info
def generate_basic_trial(self, test_mode, set_rule = None):
"""
Generate a delayed matching task
Goal is to determine whether the sample stimulus, possibly manipulated by a rule, is
identicical to a test stimulus
Sample and test stimuli are separated by a delay
"""
# range of variable delay, in time steps
var_delay_max = par['variable_delay_max']//par['dt']
# duration of mask after test onset
mask_duration = par['mask_duration']//par['dt']
trial_info = {'desired_output' : np.zeros((par['num_time_steps'], par['batch_size'], par['n_output']),dtype=np.float32),
'train_mask' : np.ones((par['num_time_steps'], par['batch_size']),dtype=np.float32),
'sample' : np.zeros((par['batch_size']),dtype=np.int8),
'test' : np.zeros((par['batch_size']),dtype=np.int8),
'rule' : np.zeros((par['batch_size']),dtype=np.int8),
'match' : np.zeros((par['batch_size']),dtype=np.int8),
'catch' : np.zeros((par['batch_size']),dtype=np.int8),
'probe' : np.zeros((par['batch_size']),dtype=np.int8),
'neural_input' : np.random.normal(par['input_mean'], par['noise_in'], size=(par['num_time_steps'], par['batch_size'], par['n_input']))}
# set to mask equal to zero during the dead time
trial_info['train_mask'][par['dead_time_rng'], :] = 0
for t in range(par['batch_size']):
"""
Generate trial paramaters
"""
sample_dir = np.random.randint(par['num_motion_dirs'])
test_RF = np.random.choice([1,2]) if par['trial_type'] == 'location_DMS' else 0
rule = np.random.randint(par['num_rules']) if set_rule is None else set_rule
if par['trial_type'] == 'DMC' or (par['trial_type'] == 'DMS+DMC' and rule == 1) or (par['trial_type'] == 'DMS+DMRS+DMC' and rule == 2):
# for DMS+DMC trial type, rule 0 will be DMS, and rule 1 will be DMC
current_trial_DMC = True
else:
current_trial_DMC = False
match = np.random.randint(2)
catch = np.random.rand() < par['catch_trial_pct']
"""
Generate trial paramaters, which can vary given the rule
"""
if par['num_rules'] == 1:
match_rotation = int(par['num_motion_dirs']*par['rotation_match']/360)
else:
match_rotation = int(par['num_motion_dirs']*par['rotation_match'][rule]/360)
"""
Determine the delay time for this trial
The total trial length is kept constant, so a shorter delay implies a longer test stimulus
"""
if par['var_delay']:
s = int(np.random.exponential(scale=par['variable_delay_max']/2))
if s <= par['variable_delay_max']:
eod_current = eod - var_delay_max + s
test_onset = (par['dead_time']+par['fix_time']+par['sample_time'] + s)//par['dt']
else:
catch = 1
else:
test_onset = (par['dead_time']+par['fix_time']+par['sample_time'] + par['delay_time'])//par['dt']
test_time_rng = range(test_onset, par['num_time_steps'])
fix_time_rng = range(test_onset)
trial_info['train_mask'][test_onset:test_onset+mask_duration, t] = 0
"""
Generate the sample and test stimuli based on the rule
"""
# DMC
if not test_mode:
if current_trial_DMC: # categorize between two equal size, contiguous zones
sample_cat = np.floor(sample_dir/(par['num_motion_dirs']/2))
if match == 1: # match trial
# do not use sample_dir as a match test stimulus
dir0 = int(sample_cat*par['num_motion_dirs']//2)
dir1 = int(par['num_motion_dirs']//2 + sample_cat*par['num_motion_dirs']//2)
possible_dirs = list(range(dir0, dir1))
test_dir = possible_dirs[np.random.randint(len(possible_dirs))]
else:
test_dir = sample_cat*(par['num_motion_dirs']//2) + np.random.randint(par['num_motion_dirs']//2)
test_dir = np.int_((test_dir+par['num_motion_dirs']//2)%par['num_motion_dirs'])
# DMS or DMRS
else:
matching_dir = (sample_dir + match_rotation)%par['num_motion_dirs']
if match == 1: # match trial
test_dir = matching_dir
else:
possible_dirs = np.setdiff1d(list(range(par['num_motion_dirs'])), matching_dir)
test_dir = possible_dirs[np.random.randint(len(possible_dirs))]
else:
test_dir = np.random.randint(par['num_motion_dirs'])
# this next part only working for DMS, DMRS tasks
matching_dir = (sample_dir + match_rotation)%par['num_motion_dirs']
match = 1 if test_dir == matching_dir else 0
"""
Calculate neural input based on sample, tests, fixation, rule, and probe
"""
# SAMPLE stimulus
trial_info['neural_input'][par['sample_time_rng'], t, :] += np.reshape(self.motion_tuning[:, 0, sample_dir],(1,-1))
# TEST stimulus
if not catch:
trial_info['neural_input'][test_time_rng, t, :] += np.reshape(self.motion_tuning[:, test_RF, test_dir],(1,-1))
# FIXATION cue
if par['num_fix_tuned'] > 0:
trial_info['neural_input'][fix_time_rng, t] += np.reshape(self.fix_tuning[:,0],(-1,1))
# RULE CUE
if par['num_rules']> 1 and par['num_rule_tuned'] > 0:
trial_info['neural_input'][par['rule_time_rng'][0], t, :] += np.reshape(self.rule_tuning[:,rule],(1,-1))
"""
Determine the desired network output response
"""
trial_info['desired_output'][fix_time_rng, t, 0] = 1.
if not catch:
trial_info['train_mask'][ test_time_rng, t] *= par['test_cost_multiplier'] # can use a greater weight for test period if needed
if match == 0:
trial_info['desired_output'][test_time_rng, t, 1] = 1.
else:
trial_info['desired_output'][test_time_rng, t, 2] = 1.
else:
trial_info['desired_output'][test_time_rng, t, 0] = 1.
"""
Append trial info
"""
trial_info['sample'][t] = sample_dir
trial_info['test'][t] = test_dir
trial_info['rule'][t] = rule
trial_info['catch'][t] = catch
trial_info['match'][t] = match
return trial_info
def generate_ABBA_trial(self, test_mode):
"""
Generate ABBA trials
Sample stimulis is followed by up to max_num_tests test stimuli
Goal is to to indicate when a test stimulus matches the sample
"""
# duration of mask after test onset
mask_duration = par['mask_duration']//par['dt']
# only one receptive field in this task
RF = 0
trial_length = par['num_time_steps']
ABBA_delay = par['ABBA_delay']//par['dt']
eos = (par['dead_time']+par['fix_time']+par['ABBA_delay'])//par['dt']
test_time_rng = []
mask_time_rng = []
for n in range(par['max_num_tests']):
test_time_rng.append(range(eos+ABBA_delay*(2*n+1), eos+ABBA_delay*(2*n+2)))
mask_time_rng.append(range(eos+ABBA_delay*(2*n+1), eos+ABBA_delay*(2*n+1) + mask_duration))
trial_info = {'desired_output' : np.zeros((par['num_time_steps'], par['batch_size'], par['n_output']),dtype=np.float32),
'train_mask' : np.ones((par['num_time_steps'], par['batch_size']),dtype=np.float32),
'sample' : np.zeros((par['batch_size']),dtype=np.float32),
'test' : -1*np.ones((par['batch_size'],par['max_num_tests']),dtype=np.float32),
'rule' : np.zeros((par['batch_size']),dtype=np.int8),
'match' : np.zeros((par['batch_size'],par['max_num_tests']),dtype=np.int8),
'catch' : np.zeros((par['batch_size']),dtype=np.int8),
'probe' : np.zeros((par['batch_size']),dtype=np.int8),
'num_test_stim' : np.zeros((par['batch_size']),dtype=np.int8),
'repeat_test_stim': np.zeros((par['batch_size']),dtype=np.int8),
'neural_input' : np.random.normal(par['input_mean'], par['noise_in'], size=(par['num_time_steps'], par['batch_size'], par['n_input']))}
# set to mask equal to zero during the dead time
trial_info['train_mask'][par['dead_time_rng'], :] = 0
# set fixation equal to 1 for all times; will then change
trial_info['desired_output'][:, :, 0] = 1
for t in range(par['batch_size']):
# generate trial params
sample_dir = np.random.randint(par['num_motion_dirs'])
"""
Generate up to max_num_tests test stimuli
Sequential test stimuli are identical with probability repeat_pct
"""
stim_dirs = [sample_dir]
test_stim_code = 0
if test_mode:
# used to analyze how sample and test neuronal and synaptic tuning relate
# not used to evaluate task accuracy
while len(stim_dirs) <= par['max_num_tests']:
q = np.random.randint(par['num_motion_dirs'])
stim_dirs.append(q)
else:
while len(stim_dirs) <= par['max_num_tests']:
if np.random.rand() < par['match_test_prob']:
stim_dirs.append(sample_dir)
else:
if len(stim_dirs) > 1 and | np.random.rand() | numpy.random.rand |
#!/usr/bin/env python3
# Copyright 2021 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
from collections import OrderedDict
import math
import numpy as np
import os
import subprocess
import pygion
from pygion import disjoint_complete, index_launch, print_once, task, Domain, Fspace, Future, Ispace, IndexLaunch, ID, Partition, N, R, Reduce, Region, RW, Trace, WD
root_dir = os.path.dirname(__file__)
circuit_header = subprocess.check_output(
[
"gcc", "-D", "__attribute__(x)=", "-E", "-P",
os.path.join(root_dir, "circuit_config.h")
]).decode("utf-8")
ffi = pygion.ffi
ffi.cdef(circuit_header)
Config = pygion.Type(
np.dtype([('bytes', np.void, ffi.sizeof('Config'))]),
'Config')
WIRE_SEGMENTS = 10
def parse_args(argv):
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-l', dest='num_loops', type=int, default=5)
parser.add_argument('-p', dest='num_pieces', type=int, default=4)
parser.add_argument('-pps', dest='pieces_per_superpiece', type=int, default=1)
parser.add_argument('-npp', dest='nodes_per_piece', type=int, default=4)
parser.add_argument('-wpp', dest='wires_per_piece', type=int, default=8)
parser.add_argument('-pct', dest='pct_wire_in_piece', type=int, default=80)
parser.add_argument('-s', dest='random_seed', type=int, default=12345)
parser.add_argument('-i', dest='steps', type=int, default=10000)
parser.add_argument('-sync', dest='sync', type=int, default=0)
parser.add_argument('-prune', dest='prune', type=int, default=0)
parser.add_argument('-checks', dest='perform_checks', action='store_true')
parser.add_argument('-dump', dest='dump_values', action='store_true')
parser.add_argument('-shared', dest='pct_shared_nodes', type=float, default=1.0)
parser.add_argument('-density', dest='density', type=int, default=20)
parser.add_argument('-neighbors', dest='num_neighbors', type=int, default=5)
parser.add_argument('-window', dest='window', type=int, default=3)
args = parser.parse_args(argv[1:])
conf = ffi.new('Config *')
for field, value in vars(args).items():
setattr(conf, field, value)
return conf
_constant_time_launches = True
if _constant_time_launches:
extern_task = pygion.extern_task
# extern_task = pygion.extern_task_wrapper
else:
extern_task = pygion.extern_task
init_piece = extern_task(
task_id=10002,
argument_types=[pygion.int32, Config, Region, Region, Region, Region, Region],
privileges=[None, None, WD, WD, WD, N, WD],
return_type=pygion.void,
calling_convention='regent')
init_pointers = extern_task(
task_id=10003,
argument_types=[Region, Region, Region, Region],
privileges=[N, N, N, RW('in_ptr', 'in_ptr_r', 'out_ptr', 'out_ptr_r')],
return_type=pygion.void,
calling_convention='regent')
calculate_new_currents = extern_task(
task_id=10004,
argument_types=[pygion.bool_, pygion.uint32, Region, Region, Region, Region, Region],
privileges=[
None,
None,
R('node_voltage'),
R('node_voltage'),
R('node_voltage'),
R('in_ptr', 'in_ptr_r', 'out_ptr', 'out_ptr_r', 'inductance', 'resistance', 'wire_cap') + RW(*['current_%d' % i for i in range(10)]) + RW(*['voltage_%d' % i for i in range(9)]),
RW],
return_type=pygion.void,
calling_convention='regent')
distribute_charge = extern_task(
task_id=10005,
argument_types=[Region, Region, Region, Region],
privileges=[
Reduce('+', 'charge'),
Reduce('+', 'charge'),
Reduce('+', 'charge'),
R('in_ptr', 'in_ptr_r', 'out_ptr', 'out_ptr_r', 'current_0', 'current_9')],
return_type=pygion.void,
calling_convention='regent')
update_voltages = extern_task(
task_id=10006,
argument_types=[pygion.bool_, Region, Region, Region],
privileges=[
None,
R('node_cap', 'leakage') + RW('node_voltage', 'charge'),
R('node_cap', 'leakage') + RW('node_voltage', 'charge'),
RW],
return_type=pygion.void,
calling_convention='regent')
@task(task_id=2, replicable=True) # , inner=True
def main():
print_once('Running circuit_sparse.py')
conf = parse_args(pygion.input_args(True))
assert conf.num_pieces % conf.pieces_per_superpiece == 0, "pieces should be evenly distributed to superpieces"
conf.shared_nodes_per_piece = int(math.ceil(conf.nodes_per_piece * conf.pct_shared_nodes / 100.0))
print_once("circuit settings: loops=%d prune=%d pieces=%d (pieces/superpiece=%d) nodes/piece=%d (nodes/piece=%d) wires/piece=%d pct_in_piece=%d seed=%d" % (
conf.num_loops, conf.prune, conf.num_pieces, conf.pieces_per_superpiece, conf.nodes_per_piece,
conf.shared_nodes_per_piece, conf.wires_per_piece, conf.pct_wire_in_piece, conf.random_seed))
num_pieces = conf.num_pieces
num_superpieces = conf.num_pieces // conf.pieces_per_superpiece
num_circuit_nodes = num_pieces * conf.nodes_per_piece
num_circuit_wires = num_pieces * conf.wires_per_piece
node = Fspace(OrderedDict([
('node_cap', pygion.float32),
('leakage', pygion.float32),
('charge', pygion.float32),
('node_voltage', pygion.float32),
]))
wire = Fspace(OrderedDict([
('in_ptr', pygion.int64),
('in_ptr_r', pygion.uint8),
('out_ptr', pygion.int64),
('out_ptr_r', pygion.uint8),
('inductance', pygion.float32),
('resistance', pygion.float32),
('wire_cap', pygion.float32),
] + [
('current_%d' % i, pygion.float32) for i in range(WIRE_SEGMENTS)
] + [
('voltage_%d' % i, pygion.float32) for i in range(WIRE_SEGMENTS - 1)
]))
timestamp = Fspace(OrderedDict([
('start', pygion.int64),
('stop', pygion.int64),
]))
all_nodes = Region([num_circuit_nodes], node)
all_wires = Region([num_circuit_wires], wire)
all_times = Region([num_superpieces], timestamp)
node_size = np.dtype(list(map(lambda x: (x[0], x[1].numpy_type), node.field_types.items())), align=True).itemsize
wire_size = np.dtype(list(map(lambda x: (x[0], x[1].numpy_type), wire.field_types.items())), align=True).itemsize
print_once("Circuit memory usage:")
print_once(" Nodes : %10d * %4d bytes = %12d bytes" % (num_circuit_nodes, node_size, num_circuit_nodes * node_size))
print_once(" Wires : %10d * %4d bytes = %12d bytes" % (num_circuit_wires, wire_size, num_circuit_wires * wire_size))
total = ((num_circuit_nodes * node_size) + (num_circuit_wires * wire_size))
print_once(" Total %12d bytes" % total)
snpp = conf.shared_nodes_per_piece
pnpp = conf.nodes_per_piece - conf.shared_nodes_per_piece
pps = conf.pieces_per_superpiece
num_shared_nodes = num_pieces * snpp
privacy_coloring = Region([2], {'rect': pygion.rect1d})
np.copyto(
privacy_coloring.rect,
np.array([(num_shared_nodes, num_circuit_nodes - 1),
(0, num_shared_nodes - 1)],
dtype=privacy_coloring.rect.dtype),
casting='no')
privacy_part = Partition.restrict(privacy_coloring, [2], np.eye(1), [1], disjoint_complete)
all_nodes_part = Partition.image(all_nodes, privacy_part, 'rect', [2], disjoint_complete)
all_private = all_nodes_part[0]
all_shared = all_nodes_part[1]
launch_domain = Ispace([num_superpieces])
private_part = Partition.restrict(
all_private, launch_domain, | np.eye(1) | numpy.eye |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 19 12:08:47 2021
@author: Kaneki
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 18 17:28:36 2021
@author: Kaneki
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
def Periodicity(pos,l):
if pos >= -l and pos <= l :
return pos
elif pos < -l:
return pos + 2*l
elif pos > l:
return pos - 2*l
def ABP_move(t, dt, N, crit_coll_num, l):
coll_num = np.zeros((N, int(t/dt)))
for i in range(0, int(t/dt) - 1): # time evolution
# Collision
for p1 in range(0,N):
for p2 in range(p1,N):
if p1 == p2:
continue
# Collision criteria
r = np.sqrt((x[p1,i] - x[p2,i]) ** 2 + (y[p1,i] - y[p2,i]) ** 2)
if r > 2.1 * a:
continue
else:
coll_num[p1,i] += 1
coll_num[p2,i] += 1
for dum in range(len(coll_num)):
if coll_num[dum, i] >= crit_coll_num:
theta[dum,i] = theta[dum,i] + np.random.uniform(0,2*np.pi) # a random angle to avoid coll
dx = v * np.cos(theta[dum,i]) * dt + np.sqrt(2*Dt*dt) * np.random.randn()
dy = v * np.sin(theta[dum,i]) * dt + np.sqrt(2*Dt*dt) * | np.random.randn() | numpy.random.randn |
import astropy.units as u
import numpy as np
from stixpy.data import test
from stixpy.science import *
def test_sciencedata_get_data():
l1 = ScienceData.from_fits(test.STIX_SCI_XRAY_CPD)
tot = l1.data['counts']
norm = (l1.data['timedel'].reshape(5, 1, 1, 1) * l1.dE)
rate = tot / norm
error = np.sqrt(tot*u.ct+l1.data['counts_err']**2) / norm
r, re, t, dt, e = l1.get_data()
assert np.allclose(rate, r)
assert np.allclose(error, re)
# Detector sum
tot = l1.data['counts'][:, 0:32, ...].sum(axis=1, keepdims=True)
norm = (l1.data['timedel'].reshape(5, 1, 1, 1) * l1.dE)
rate = tot / norm
error = np.sqrt(tot*u.ct+l1.data['counts_err'][:, 0:32, ...].sum(axis=1, keepdims=True)**2)/norm
r, re, t, dt, e = l1.get_data(detector_indices=[[0, 31]])
assert np.allclose(rate, r)
assert np.allclose(error, re, atol=1e-3)
# Pixel sum
tot = l1.data['counts'][..., 0:12, :].sum(axis=2, keepdims=True)
norm = (l1.data['timedel'].reshape(5, 1, 1, 1) * l1.dE)
rate = tot / norm
error = np.sqrt(tot * u.ct
+ l1.data['counts_err'][..., 0:12, :].sum(axis=2, keepdims=True)**2) / norm
r, re, t, dt, e = l1.get_data(pixel_indices=[[0, 11]])
assert np.allclose(rate, r)
assert np.allclose(error, re)
# Detector and Pixel sum
tot = l1.data['counts'][:, 0:32, 0:12, :].sum(axis=(1, 2), keepdims=True)
norm = (l1.data['timedel'].reshape(5, 1, 1, 1) * l1.dE)
rate = tot / norm
error = np.sqrt(tot*u.ct + l1.data['counts_err'][:, 0:32, 0:12, :].sum(axis=(1, 2),
keepdims=True)**2) / norm
r, re, t, dt, e = l1.get_data(pixel_indices=[[0, 11]], detector_indices=[[0, 31]])
assert np.allclose(rate, r)
assert np.allclose(error, re, atol=1e-3)
# Energy sum
tot = l1.data['counts'][..., 1:31].sum(axis=3, keepdims=True)
norm = (l1.data['timedel'].reshape(5, 1, 1, 1)
* (l1.energies[30]['e_high']-l1.energies[1]['e_low']))
rate = tot / norm
error = np.sqrt(tot*u.ct + l1.data['counts_err'][..., 1:31].sum(axis=3, keepdims=True)**2)/norm
r, re, t, dt, e = l1.get_data(energy_indices=[[1, 30]])
assert np.allclose(rate, r)
assert np.allclose(error, re, atol=1e-3)
# Time sum
tot = l1.data['counts'][:, ...].sum(axis=0, keepdims=True)
norm = (l1.data['timedel'].sum() * l1.dE)
rate = tot / norm
error = np.sqrt(tot * u.ct + l1.data['counts_err'][:, ...].sum(axis=0, keepdims=True) ** 2)/norm
r, re, t, dt, e = l1.get_data(time_indices=[[0, 4]])
assert np.allclose(rate, r)
assert np.allclose(error, re)
# Sum everything down to one number
tot = l1.data['counts'][..., 1:31].sum(keepdims=True)
norm = (l1.data['timedel'].sum() * (l1.energies[30]['e_high'] - l1.energies[1]['e_low']))
rate = tot/norm
error = np.sqrt(tot * u.ct + l1.data['counts_err'][..., 1:31].sum(keepdims=True) ** 2) / norm
r, re, t, dt, e = l1.get_data(time_indices=[[0, 4]], energy_indices=[[1, 30]],
pixel_indices=[[0, 11]], detector_indices=[[0, 31]])
assert | np.allclose(rate, r) | numpy.allclose |
import cv2
import numpy as np
import math
from PIL import Image
import random
class DIP:
def __init__(self):
pass
def read(self, file):
return np.array(Image.open(file))
def save(self, file, image):
return cv2.imwrite(file, image )
def resize(self, image, size):
return cv2.resize(image, (size[0], size[1]))
def cvtGreyscale(self, image):
grey = np.dot(image[...,:3], [0.2989, 0.5870, 0.114])
grey /= np.max(grey)
return grey
def gaussianKernel(self, kernelSize, sigma, flag=True, BilSpatial=None):
normal = 1 / (2.0 * np.pi * sigma * sigma)
if flag:
center = kernelSize // 2
x, y = np.mgrid[-center:center + 1, -center:center + 1]
kernel = np.exp(-((x * x + y * y) / (2.0 * sigma * sigma))) * normal
else:
kernel = np.exp(-(kernelSize*kernelSize / (2.0 * sigma * sigma)))
kernel = np.multiply(kernel, BilSpatial)
return kernel
def gaussianFilter(self, image, kernelSize, sigma):
gKernel = self.gaussianKernel(kernelSize, sigma)
output = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
output[col, row] = np.sum(gKernel * padded_image[col:col + kernelSize, row:row + kernelSize])
output /= np.max(output)
return output
def gabf(self, image, kernelSize, sigmaS, sigmaR):
spatialKernel = self.gaussianKernel(kernelSize, sigmaS)
LP_guide = np.zeros(image.shape, np.float)
output = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
LP_guide[col, row] = np.sum(spatialKernel * padded_image[col:col + kernelSize, row:row + kernelSize])
LP_guide /= np.max(LP_guide)
padded_image = np.pad(LP_guide, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
neighb_win = padded_image[col:col + kernelSize, row:row + kernelSize]
intensity_diff = np.absolute(image[col, row] - neighb_win)
weights = self.gaussianKernel(intensity_diff, sigmaR, flag=False, BilSpatial=spatialKernel)
vals = np.sum(np.multiply(weights, neighb_win))
norm = np.sum(weights)
output[col, row] = np.divide(vals, norm, out=np.zeros_like(vals), where=norm != 0)
output /= np.max(output)
return output
def median(self, image, kernelSize):
output = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]):
for col in range(image.shape[0]):
neighb_win = padded_image[col:col + kernelSize, row:row + kernelSize]
output[col, row] = np.median(neighb_win)
output /= np.max(output)
return output
def gradient2x2(self, image):
kernelSize = 2
gX = np.array([
[-1, 1],
[-1, 1]
])
gY = np.array([
[1, 1],
[-1, -1]
])
G_x = np.zeros(image.shape, np.float)
G_y = np.zeros(image.shape, np.float)
padded_image = np.pad(image, int((kernelSize - 1) / 2), 'edge')
for row in range(image.shape[1]): # loop through row
for col in range(image.shape[0]): # loop through col
pix = padded_image[col:col + kernelSize, row:row + kernelSize] # get pixel value
G_x[col, row] = np.sum(np.multiply(gX, pix))
G_y[col, row] = np.sum(np.multiply(gY, pix))
filtered_image = np.hypot(G_x, G_y)
angle_image = np.arctan2(G_y, G_x)
filtered_image /= np.max(filtered_image)
return filtered_image, angle_image
def nonMax_Supp(self, image, angle):
output = np.zeros(image.shape, np.float64)
angle = np.rad2deg(angle)
angle[angle < 0] += 180
for row in range(1, image.shape[1] - 1): # loop through row
for col in range(1, image.shape[0] - 1): # loop through col
if image[col, row] == 0:
continue
if (0 <= angle[col, row] < 22.5) or (157.5 <= angle[col, row] <= 180):
adj_pix = max(image[col, row + 1], image[col, row - 1])
# angle 45
elif (22.5 <= angle[col, row] < 67.5):
adj_pix = max(image[col + 1, row - 1], image[col - 1, row + 1])
# angle 90
elif (67.5 <= angle[col, row] < 112.5):
adj_pix = max(image[col + 1, row], image[col - 1, row])
# angle 135
elif (112.5 <= angle[col, row] < 157.5):
adj_pix = max(image[col - 1, row - 1], image[col + 1, row + 1])
if image[col, row] >= adj_pix:
output[col, row] = image[col, row]
# else:
# output[col, row] = 0
output /= np.max(output)
output *= 255
return output.astype(np.uint8)
def thresholding(self, image, thresH, thresL):
output = np.zeros(image.shape, np.uint8)
output[image >= thresH] = 255
output[(image < thresH) & (image >= thresL)] = 100
return output
def hysteresis(self, image, nms=None):
connect = True
marker = np.full(image.shape, False)
while connect:
connect = False
for row in range(image.shape[1]):
for col in range(image.shape[0]):
if (image[col, row]==255) and not marker[col,row]:
marker[col, row] = True
try:
if image[col+1, row-1] == 100:
image[col + 1, row - 1] = 255
connect = True
if image[col+1, row] == 100:
image[col + 1, row] = 255
connect = True
if image[col+1, row+1] == 100:
image[col+1, row+1] = 255
connect = True
if image[col, row-1] == 100:
image[col, row - 1] = 255
connect = True
if image[col, row+1] == 100:
image[col, row + 1] = 255
connect = True
if image[col-1, row-1] == 100:
image[col - 1, row - 1] = 255
connect = True
if image[col-1, row] == 100:
image[col - 1, row] = 255
connect = True
if image[col-1, row+1] == 100:
image[col - 1, row + 1] = 255
connect = True
except IndexError as e:
pass
image[image < 255] = 0
if type(nms)==np.ndarray:
nms[image==0] = 0
return image, nms
def chainFormation(self, image, nms):
h, w = image.shape
for col in range(h): # loop through col
for row in range(w): # loop through row
if image[col, row] == 0: # centre aldy zero
continue
elif 1 <= col < h - 2 and 1 <= row < w - 2 and np.count_nonzero(image[col - 1:col + 2, row - 1:row + 2] == 255) == 1: # isolated point nt need compare
image[col, row] = 0
image = image.astype('int32')
image[image == 255] = np.count_nonzero(image == 255)
key = 1 # initial key
NewKey = 1 #
again = True
direction = 1
found = 0
temp_grad = 0
info = []
while (again):
again = False
if direction == 1:
startR, stopR, stepR = 0, w, 1
else:
startR, stopR, stepR = w - 1, -1, -1
currentCol = h - 2
for col in range(h): # loop through col
if again:
break
for row in range(startR, stopR, stepR): # loop through row
if image[col, row] <= key: # skip zero and traced edge
continue
if key < NewKey:
if image[col - 1, row - 1] == key or image[col, row - 1] == key or image[col + 1, row - 1] == key or \
image[col - 1, row] == key or image[col + 1, row] == key or \
image[col - 1, row + 1] == key or image[col, row + 1] == key or image[col + 1, row + 1] == key:
image[col, row] = key
temp_grad += nms[col, row] # accumulate gradient of edge chain
currentCol = col
elif key == NewKey: # intialize and assign new key
image[col, row] = key
NewKey += 1
temp_grad += nms[col, row] # accumulate gradient of edge chain
currentCol = col
if col > currentCol:
again = True
currentFound = np.count_nonzero(image == key) - found
found += currentFound
direction *= -1
if currentFound == 0:
if np.count_nonzero(image == key) == 0:
# print('no more key found')
again = False
break
temp_grad /= found
info.append((key, found, temp_grad)) ### key, edge_length, mean local max
key += 1 # end search of current key
found = 0 # restart count of edgel per chain
direction = 1 # always start forward
temp_grad = 0 # reset local max accumulator
print('reassign key ...', key)
output = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
for k in range(1, key):
output[image == k] = (random.randrange(75, 256), random.randrange(75, 256), random.randrange(75, 256))
### key, edge_length, mean local max
infoArr = np.array(info)
meanEdgeLength = np.mean(infoArr[:, 1])
meanLocalMax = | np.mean(infoArr[:, 2]) | numpy.mean |
# -*- coding: utf-8 -*-
import numpy as np
import scipy
import scipy.linalg
import scipy.optimize
import scipy.spatial
def vector(x, y, z):
""" A shortcut for creating 3D-space vectors;
in case you need a lot of manual np.array([...]) """
return np.array([x, y, z])
def deg2rad(deg):
""" Convert degrees (input) to radians """
return deg*np.pi/180.
def rad2deg(rad):
""" convert radians (input) to degrees """
return rad*180./np.pi
def norm(vector):
""" a shortcut to scipy.linalg.norm() """
return scipy.linalg.norm(vector)
def unit_vector(vector):
""" Returns a vector of magnitude 1 with the same direction"""
return vector / norm(vector)
def angle_between(v1, v2):
""" Returns the angle between vectors 'v1' and 'v2', in radians:
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
Kudos: https://stackoverflow.com/questions/2827393/
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def arbitrary_rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
# Kudos to
# https://stackoverflow.com/questions/6802577/rotation-of-3d-vector
#import math
#
# axis = np.asarray(axis)
# axis = axis / math.sqrt(np.dot(axis, axis))
# a = math.cos(theta / 2.0)
# b, c, d = -axis * math.sin(theta / 2.0)
# aa, bb, cc, dd = a * a, b * b, c * c, d * d
# bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
# return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
# [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
# [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
# Also Kudos to the guy with another answer for the same question (used here): """
return scipy.linalg.expm(np.cross(np.eye(3), axis/norm(axis)*theta))
def arbitrary_rotation(point, axis, theta, origin):
""" Rotate a point around any axis given by axis by angle theta [radians] """
rotated_point = np.dot(arbitrary_rotation_matrix(axis, theta), point - origin)
return rotated_point + origin
def rotate(point, angle, axis='x'):
""" Rotate a point around a given axis by specified angle """
if axis == 'y':
axis = vector(0, 1, 0)
elif axis == 'z':
axis = vector(0, 0, 1)
elif axis == 'x':
axis = vector(1, 0, 0)
else:
raise ValueError("Rotation axis should be either 'x', 'y', or 'z' ")
return arbitrary_rotation(point, axis, angle, vector(0, 0, 0))
def to_polar(point, axis='z'):
""" Convert (x, y, z) point to (radius, angle, height);
the axis of the new polar coordinate system can be chosen ('x' or 'z') """
assert axis in ['x', 'z']
if axis == 'z':
radius = (point[0]**2 + point[1]**2)**0.5
angle = np.arctan2(point[1], point[0])
height = point[2]
else: # axis == 'x'
radius = (point[1]**2 + point[2]**2)**0.5
angle = np.arctan2(point[2], point[1])
height = point[0]
return vector(radius, angle, height)
def to_cartesian(p, direction=1, axis='z'):
""" Converts a point given in (r, theta, z) coordinates to
cartesian coordinate system.
optionally, axis can be aligned with either cartesian axis x* or z and
rotation sense can be inverted with direction=-1
*when axis is 'x': theta goes from 0 at y-axis toward z-axis
"""
assert direction in [-1, 1]
assert axis in ['x', 'z']
radius = p[0]
angle = direction*p[1]
height = p[2]
if axis == 'z':
return vector(radius*np.cos(angle), radius*np.sin(angle), height)
# axis == 'x'
return vector( height, radius*np.cos(angle), radius*np.sin(angle) )
def lin_map(x, x_min, x_max, out_min, out_max, limit=False):
""" map x that should take values from x_min to x_max
to values out_min to out_max"""
r = float(x - x_min) * float(out_max - out_min) / \
float(x_max - x_min) + float(out_min)
if limit:
return sorted([out_min, r, out_max])[1]
else:
return r
def xy_line_intersection(p_1, p_2, p_3, p_4):
""" p_1 and p_2 define the first line, p_3 and p_4 define the second;
return a point of intersection between these two lines in x-y plane
Kudos: https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Given_two_points_on_each_line
"""
# only take x and y coordinates
x1 = p_1[0]
y1 = p_1[1]
x2 = p_2[0]
y2 = p_2[1]
x3 = p_3[0]
y3 = p_3[1]
x4 = p_4[0]
y4 = p_4[1]
def det(p1, p2, p3, p4):
return np.linalg.det(np.array([[p1, p2], [p3, p4]]))
Dx1 = det(x1, y1, x2, y2)
Dx2 = det(x1, 1, x2, 1)
Dx3 = det(x3, y3, x4, y4)
Dx4 = det(x3, 1, x4, 1)
Dx5 = Dx2
Dx6 = det(y1, 1, y2, 1)
Dx7 = Dx4
Dx8 = det(y3, 1, y4, 1)
# x-coordinate
Px = det(Dx1, Dx2, Dx3, Dx4)/det(Dx5, Dx6, Dx7, Dx8)
# y-coordinate
Dy1 = Dx1
Dy2 = Dx6
Dy3 = Dx3
Dy4 = Dx8
Dy5 = Dx2
Dy6 = Dx6
Dy7 = Dx7
Dy8 = Dx8
Py = det(Dy1, Dy2, Dy3, Dy4)/det(Dy5, Dy6, Dy7, Dy8)
return vector(Px, Py, 0)
# alternative solution with vectors
# A = np.array([
# [p_2[0] - p_1[0], p_4[0] - p_3[0]],
# [p_2[1] - p_1[1], p_4[1] - p_3[1]],
# ])
#
# b = np.array([p_3[0] - p_1[0], p_3[1] - p_1[1]])
#
# k1k2 = np.linalg.solve(A, b)
# k1 = k1k2[0]
# k2 = k1k2[1]
#
# va = vector(
# p_1[0] + k1*(p_2[0] - p_1[0]),
# p_1[1] + k1*(p_2[1] - p_1[1]),
# 0
# )
#
# vb = vector(
# p_3[0] + k2*(p_4[0] - p_3[0]),
# p_3[1] + k2*(p_4[1] - p_3[1]),
# 0
# )
#
# print(P-va, P-vb, norm(va-vb))
# return va
def extend_to_y(p_1, p_2, y):
""" Return a point that lies on a line defined by p_1 and p_2 and on y=y; only in xy-plane! """
fk_3 = lambda k: p_1[1] + k*(p_2 - p_1)[1] - y
k_3 = scipy.optimize.newton(fk_3, 0)
return p_1 + k_3*(p_2 - p_1)
def arc_length_3point(A, B, C):
""" Returns length of arc defined by 3 points, A, B and C; B is the point in between """
A = np.asarray(A)
B = np.asarray(B)
C = | np.asarray(C) | numpy.asarray |
'''
Comparison of Continuation/Generalized-inverse (G/Ginv) method
and Continuation/Generalized Minimum RESidual (C/GMRES) method
Two-link Arm system
Made in Feb. 2022 ver. 0.1
Fer. 2022 ver. 0.1.1
Bug fixed.
BSD 2-Clause License
Copyright (c) 2022, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import numpy as np
import matplotlib.pyplot as plt
from CGinv import C_Ginv
from CGMRES import C_GMRES
import time
###########################
## simulation parameters ##
###########################
##################################
## common simulation parameters ##
##################################
state_dim=4 # state dimension
input_dim=2 # input dimension
t0=0.0 # initial time [s]
N=4 # Integration steps within the MPC computation
dt=0.01 # Time step for evolution of actual time [s]
Tf=1.0 # Simulation duration [s]
max_iter=int((Tf-t0)/dt)+1 # iteration of simulation (for loop iteration)
####################
## Initial state ##
####################
x_init= | np.zeros(state_dim) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_treeinterpreter
----------------------------------
Tests for `treeinterpreter` module.
"""
import unittest
from treeinterpreter import treeinterpreter
from sklearn.datasets import load_boston, load_iris
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
import numpy as np
class TestTreeinterpreter(unittest.TestCase):
def setUp(self):
self.boston = load_boston()
self.iris = load_iris()
def test_tree_regressor(self):
X = self.boston.data
Y = self.boston.target
testX = X[len(X)/2:]
#Predict for decision tree
dt = DecisionTreeRegressor()
dt.fit(X[:len(X)/2], Y[:len(X)/2])
base_prediction = dt.predict(testX)
pred, bias, contrib = treeinterpreter.predict(dt, testX)
self.assertTrue( | np.allclose(base_prediction, pred) | numpy.allclose |
import numpy as np
from numpy.linalg import LinAlgError
class EigenGrasp(object):
def __init__(self, size, eigenval = 0.0, min=0.0, max=0.0):
self._size = size
self._eigenval = eigenval
self._min = min
self._max = max
self._amp = 0.0
self._vals = [0.0] * size
def setOnes(self):
vals = [1.0] * self._size
self.setVals(vals)
def setRange(self, min, max):
self._min = min
self._max = max
def getAxisVal(self, i):
return self._vals[i]
def setAxisVal(self, i, val):
self._vals[i] = val
def getVals(self):
return self._vals
def setVals(self, vals):
if (len(vals) != self._size):
print("ERROR: EigenGrasp(vals), len(vals) != self._size", len(vals), self._size)
return
for i in range(len(vals)):
self._vals[i] = vals[i]
class EigenGraspInterface(object):
def __init__(self, robot, eigen_grasps, originVals):
self._robot = robot
self._dSize = robot.getDofsCount() # dof joint space dimension size
self._eigen_grasps = eigen_grasps
self._eSize = len(eigen_grasps) # eigengrasp space dimension size
self._eg_origin = EigenGrasp(self._dSize)
self._eg_origin.setVals(originVals)
self._norm = EigenGrasp(self._dSize)
self._norm.setOnes()
self._mP = np.matlib.zeros((self._eSize, self._dSize))
self._mPInv = np.matlib.zeros((self._dSize, self._eSize))
# ---------------------------------------------------------------
# Compute Projection Matrix between dof & eg spaces.
self.computeProjectionMatrices()
# Set the min max values for all eigengrasps
self.setEigenGraspsMinMax()
def setEigenGraspsMinMax(self):
# EIGENGRASP_LOOSE
GB_EG_MIN = +1.0e5
GB_EG_MAX = -1.0e5
# mmin = -1.0e5;
# mmax = +1.0e5;
dofs = self._robot.getCurrentDofs()
amps = self.toEigenGrasp(dofs)
for e in range(self._eSize):
eg_vals = self._eigen_grasps[e].getVals()
eg_min, eg_max = GB_EG_MAX, GB_EG_MIN
for d in range(self._dSize):
if(eg_vals[d] == 0 or self._eg_origin.getAxisVal(d) == 0):
continue
dof_min, dof_max = self._robot.getDOFRange(d)
eg_min = (dof_min - dofs[d]) / (eg_vals[d] * self._eg_origin.getAxisVal(d))
eg_max = (dof_max - dofs[d]) / (eg_vals[d] * self._eg_origin.getAxisVal(d))
if(eg_min > eg_max):
eg_min, eg_max = eg_max, eg_min
#x = x + y
#y = x - y
#x = x - y
if(eg_min < GB_EG_MIN):
GB_EG_MIN = eg_min
if(eg_max > GB_EG_MAX):
GB_EG_MAX = eg_max
self._eigen_grasps[e].setRange(eg_min, eg_max)
def checkOrigin(self):
for d in range(self._dSize):
dof_min, dof_max = self._robot.getDOFRange(d)
if(self._eg_origin.getAxisVal(d) < dof_min):
print("WARNING: Eigengrasp origin lower than DOF range:", d);
self._eg_origin.setAxisVal(d, dof_min)
if(self._eg_origin.getAxisVal(d) > dof_max):
print("WARNING: WARNING: Eigengrasp origin greater than DOF:", d);
self._eg_origin.setAxisVal(d, dof_max)
def computeProjectionMatrices(self):
E = np.matlib.zeros((self._eSize, self._dSize))
for e in range(self._eSize):
for d in range(self._dSize):
E[e,d] = self._eigen_grasps[e].getAxisVal(d)
# --------------------------------------------------
ET = E.transpose()
EET = np.matlib.zeros((self._eSize, self._eSize))
EET = np.dot(E, ET)
try:
EETInv = np.linalg.inv(EET)
except LinAlgError as err:
return
self._mP = np.dot(EETInv, E)
self._mPInv = ET
def toDOF(self, amps):
if(len(amps) != self._eSize):
print('ERROR: toDOF-Invalid amplitudes!', len(amps), self._eSize)
return None
dofs = [0.0] * self._dSize
a = np.asmatrix(amps).transpose()
x = np.matlib.zeros((self._dSize,1))
x = np.dot(self._mPInv, a)
for d in range(self._dSize):
dofs[d] = x[d,0] * self._norm.getAxisVal(d) + self._eg_origin.getAxisVal(d)
return dofs
def toEigenGrasp(self, dofs):
if(len(dofs) != self._dSize):
print('ERROR: toEigenGrasp-Invalid dofs!', len(dofs), self._dSize)
return
amps = [0.0] * self._eSize
x = | np.matlib.zeros((self._dSize,1)) | numpy.matlib.zeros |
import argparse
import numpy as np
import sys
from collections import Counter
from scipy.sparse import csr_matrix, lil_matrix, vstack
from scipy.stats import gamma
# Read in vocabulary from file.
def get_vocab(vocab_fn, ignore_case):
vocab = []
vocab_index = {}
for i, line in enumerate(open(vocab_fn, mode='r', encoding='utf-8')):
term = line.strip()
if ignore_case:
term = term.lower()
vocab.append(term)
vocab_index[term] = i
return vocab, vocab_index
# From input corpus in_tsv and the index of working vocabulary vocab_index
# construct:
# authors: working list of authors
# author_doc_ids: mapping of authors to document ids
# doc_term_matrix: document-term matrix
def process_corpus(in_tsv, vocab_index, ignore_case, verbose):
vocab_size = len(vocab_index)
authors_by_doc = []
doc_vectors = []
n_lines = sum(1 for line in open(in_tsv))
reader = open(in_tsv, mode='r', encoding='utf-8')
for i, line in enumerate(reader):
if verbose and i and i % 1000 == 0:
print('{}/{}'.format(i, n_lines), file=sys.stderr)
fields = line.strip().split('\t')
authors_by_doc.append(fields[1])
vector = lil_matrix((1, vocab_size))
tokens = fields[2].split()
if ignore_case:
tokens = [t.lower() for t in tokens]
term_counts = Counter(tokens)
for term in term_counts:
if term in vocab_index:
col = vocab_index[term]
vector[0, col] = term_counts[term]
doc_vectors.append(vector)
doc_term_matrix = vstack(doc_vectors, format='csr')
authors = sorted(list(set(authors_by_doc)))
author_index = {author: i for i, author in enumerate(authors)}
author_doc_ids = {author: [] for author in authors}
for i, a in enumerate(authors_by_doc):
author_doc_ids[a].append(i)
return authors, author_index, author_doc_ids, doc_term_matrix
# Construct author-term matrix from document-term matrix.
def get_author_term_matrix(authors, author_doc_ids, doc_term_matrix):
author_vectors = [csr_matrix(doc_term_matrix[doc_ids].sum(axis=0)) for
doc_ids in author_doc_ids.values()]
author_term_matrix = vstack(author_vectors, format='csc')
return author_term_matrix
# Estimate gamma parameters k, theta using method of moments
def get_gamma_parameters(author_term_freqs):
term_means = np.mean(author_term_freqs, axis=0).getA1()
term_vars = np.var(author_term_freqs, axis=0, ddof=1).getA1()
ks = np.divide( | np.square(term_means) | numpy.square |
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml
import numpy as np
import pickle
import pytest
from cuml.tsa.arima import ARIMA
from cuml.test.utils import array_equal, unit_param, stress_param, \
ClassEnumerator, get_classes_from_package
from cuml.test.test_svm import compare_svm, compare_probabilistic_svm
from sklearn.base import clone
from sklearn.datasets import load_iris, make_classification, make_regression
from sklearn.manifold.t_sne import trustworthiness
from sklearn.model_selection import train_test_split
regression_config = ClassEnumerator(module=cuml.linear_model)
regression_models = regression_config.get_models()
solver_config = ClassEnumerator(
module=cuml.solvers,
# QN uses softmax here because some of the tests uses multiclass
# logistic regression which requires a softmax loss
custom_constructors={"QN": lambda: cuml.QN(loss="softmax")}
)
solver_models = solver_config.get_models()
cluster_config = ClassEnumerator(
module=cuml.cluster,
exclude_classes=[cuml.DBSCAN,
cuml.AgglomerativeClustering]
)
cluster_models = cluster_config.get_models()
decomposition_config = ClassEnumerator(module=cuml.decomposition)
decomposition_models = decomposition_config.get_models()
decomposition_config_xfail = ClassEnumerator(module=cuml.random_projection)
decomposition_models_xfail = decomposition_config_xfail.get_models()
neighbor_config = ClassEnumerator(module=cuml.neighbors)
neighbor_models = neighbor_config.get_models()
dbscan_model = {"DBSCAN": cuml.DBSCAN}
agglomerative_model = {"AgglomerativeClustering": cuml.AgglomerativeClustering}
umap_model = {"UMAP": cuml.UMAP}
rf_module = ClassEnumerator(module=cuml.ensemble)
rf_models = rf_module.get_models()
k_neighbors_config = ClassEnumerator(module=cuml.neighbors, exclude_classes=[
cuml.neighbors.NearestNeighbors])
k_neighbors_models = k_neighbors_config.get_models()
unfit_pickle_xfail = [
'ARIMA',
'AutoARIMA',
'KalmanFilter',
'BaseRandomForestModel',
'ForestInference',
'MulticlassClassifier',
'OneVsOneClassifier',
'OneVsRestClassifier'
]
unfit_clone_xfail = [
'AutoARIMA',
"ARIMA",
"BaseRandomForestModel",
"GaussianRandomProjection",
'MulticlassClassifier',
'OneVsOneClassifier',
'OneVsRestClassifier',
"SparseRandomProjection",
]
all_models = get_classes_from_package(cuml, import_sub_packages=True)
all_models.update({
**regression_models,
**solver_models,
**cluster_models,
**decomposition_models,
**decomposition_models_xfail,
**neighbor_models,
**dbscan_model,
**agglomerative_model,
**umap_model,
**rf_models,
**k_neighbors_models,
'ARIMA': lambda: ARIMA(np.random.normal(0.0, 1.0, (10,))),
'ExponentialSmoothing':
lambda: cuml.ExponentialSmoothing(np.array([-217.72, -206.77])),
})
def pickle_save_load(tmpdir, func_create_model, func_assert):
model, X_test = func_create_model()
pickle_file = tmpdir.join('cu_model.pickle')
try:
with open(pickle_file, 'wb') as pf:
pickle.dump(model, pf)
except (TypeError, ValueError) as e:
pf.close()
pytest.fail(e)
del model
with open(pickle_file, 'rb') as pf:
cu_after_pickle_model = pickle.load(pf)
func_assert(cu_after_pickle_model, X_test)
def make_classification_dataset(datatype, nrows, ncols, n_info, n_classes):
X, y = make_classification(n_samples=nrows, n_features=ncols,
n_informative=n_info,
n_classes=n_classes,
random_state=0)
X = X.astype(datatype)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
return X_train, y_train, X_test
def make_dataset(datatype, nrows, ncols, n_info):
X, y = make_regression(n_samples=nrows, n_features=ncols,
n_informative=n_info, random_state=0)
X = X.astype(datatype)
y = y.astype(datatype)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
return X_train, y_train, X_test
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('key', rf_models.keys())
@pytest.mark.parametrize('nrows', [unit_param(500)])
@pytest.mark.parametrize('ncols', [unit_param(16)])
@pytest.mark.parametrize('n_info', [unit_param(7)])
@pytest.mark.parametrize('n_classes', [unit_param(2), unit_param(5)])
def test_rf_regression_pickle(tmpdir, datatype, nrows, ncols, n_info,
n_classes, key):
result = {}
if datatype == np.float64:
pytest.xfail("Pickling is not supported for dataset with"
" dtype float64")
def create_mod():
if key == 'RandomForestRegressor':
X_train, y_train, X_test = make_dataset(datatype,
nrows,
ncols,
n_info)
else:
X_train, y_train, X_test = make_classification_dataset(datatype,
nrows,
ncols,
n_info,
n_classes)
model = rf_models[key]()
model.fit(X_train, y_train)
if datatype == np.float32:
predict_model = "GPU"
else:
predict_model = "CPU"
result["rf_res"] = model.predict(X_test,
predict_model=predict_model)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["rf_res"], pickled_model.predict(X_test))
# Confirm no crash from score
pickled_model.score(X_test, np.zeros(X_test.shape[0]),
predict_model="GPU")
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('keys', regression_models.keys())
@pytest.mark.parametrize('data_size', [unit_param([500, 20, 10]),
stress_param([500000, 1000, 500])])
@pytest.mark.parametrize('fit_intercept', [True, False])
def test_regressor_pickle(tmpdir, datatype, keys, data_size, fit_intercept):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
if "LogisticRegression" in keys and nrows == 500000:
nrows, ncols, n_info = (nrows // 20, ncols // 20, n_info // 20)
X_train, y_train, X_test = make_dataset(datatype, nrows,
ncols, n_info)
if "MBSGD" in keys:
model = regression_models[keys](fit_intercept=fit_intercept,
batch_size=nrows/100)
else:
model = regression_models[keys](fit_intercept=fit_intercept)
model.fit(X_train, y_train)
result["regressor"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["regressor"], pickled_model.predict(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('keys', solver_models.keys())
@pytest.mark.parametrize('data_size', [unit_param([500, 20, 10]),
stress_param([500000, 1000, 500])])
def test_solver_pickle(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
if "QN" in keys and nrows == 500000:
nrows, ncols, n_info = (nrows // 20, ncols // 20, n_info // 20)
X_train, y_train, X_test = make_dataset(datatype, nrows,
ncols, n_info)
model = solver_models[keys]()
model.fit(X_train, y_train)
result["solver"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["solver"], pickled_model.predict(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('keys', cluster_models.keys())
@pytest.mark.parametrize('data_size', [unit_param([500, 20, 10]),
stress_param([500000, 1000, 500])])
def test_cluster_pickle(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
X_train, y_train, X_test = make_dataset(datatype, nrows,
ncols, n_info)
model = cluster_models[keys]()
model.fit(X_train)
result["cluster"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
assert array_equal(result["cluster"], pickled_model.predict(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('keys', decomposition_models_xfail.values())
@pytest.mark.parametrize('data_size', [unit_param([500, 20, 10]),
stress_param([500000, 1000, 500])])
@pytest.mark.xfail
def test_decomposition_pickle(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
X_train, y_train, X_test = make_dataset(datatype, nrows,
ncols, n_info)
model = decomposition_models_xfail[keys]()
result["decomposition"] = model.fit_transform(X_train)
return model, X_train
def assert_model(pickled_model, X_test):
assert array_equal(result["decomposition"],
pickled_model.transform(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('keys', umap_model.keys())
def test_umap_pickle(tmpdir, datatype, keys):
result = {}
def create_mod():
X_train = load_iris().data
model = umap_model[keys](output_type="numpy")
cu_before_pickle_transform = model.fit_transform(X_train)
result["umap_embedding"] = model.embedding_
n_neighbors = model.n_neighbors
result["umap"] = trustworthiness(X_train,
cu_before_pickle_transform,
n_neighbors)
return model, X_train
def assert_model(pickled_model, X_train):
cu_after_embed = pickled_model.embedding_
n_neighbors = pickled_model.n_neighbors
assert array_equal(result["umap_embedding"], cu_after_embed)
cu_trust_after = trustworthiness(X_train,
pickled_model.transform(X_train),
n_neighbors)
assert cu_trust_after >= result["umap"] - 0.2
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('keys', decomposition_models.keys())
@pytest.mark.parametrize('data_size', [unit_param([500, 20, 10]),
stress_param([500000, 1000, 500])])
@pytest.mark.xfail
def test_decomposition_pickle_xfail(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
X_train, _, _ = make_dataset(datatype, nrows,
ncols, n_info)
model = decomposition_models[keys]()
result["decomposition"] = model.fit_transform(X_train)
return model, X_train
def assert_model(pickled_model, X_test):
assert array_equal(result["decomposition"],
pickled_model.transform(X_test))
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('model_name',
all_models.keys())
def test_unfit_pickle(model_name):
# Any model xfailed in this test cannot be used for hyperparameter sweeps
# with dask or sklearn
if (model_name in decomposition_models_xfail.keys() or
model_name in unfit_pickle_xfail):
pytest.xfail()
# Pickling should work even if fit has not been called
mod = all_models[model_name]()
mod_pickled_bytes = pickle.dumps(mod)
mod_unpickled = pickle.loads(mod_pickled_bytes)
assert mod_unpickled is not None
@pytest.mark.parametrize('model_name',
all_models.keys())
def test_unfit_clone(model_name):
if model_name in unfit_clone_xfail:
pytest.xfail()
# Cloning runs into many of the same problems as pickling
mod = all_models[model_name]()
clone(mod)
# TODO: check parameters exactly?
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('keys', neighbor_models.keys())
@pytest.mark.parametrize('data_info', [unit_param([500, 20, 10, 5]),
stress_param([500000, 1000, 500, 50])])
def test_neighbors_pickle(tmpdir, datatype, keys, data_info):
result = {}
def create_mod():
nrows, ncols, n_info, k = data_info
X_train, y_train, X_test = make_dataset(datatype, nrows, ncols, n_info)
model = neighbor_models[keys]()
if keys in k_neighbors_models.keys():
model.fit(X_train, y_train)
else:
model.fit(X_train)
result["neighbors_D"], result["neighbors_I"] = \
model.kneighbors(X_test, n_neighbors=k)
return model, X_test
def assert_model(pickled_model, X_test):
D_after, I_after = pickled_model.kneighbors(X_test,
n_neighbors=data_info[3])
assert array_equal(result["neighbors_D"], D_after)
assert array_equal(result["neighbors_I"], I_after)
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('data_info', [unit_param([500, 20, 10, 3, 5]),
stress_param([500000, 1000, 500, 10,
50])])
@pytest.mark.parametrize('keys', k_neighbors_models.keys())
def test_k_neighbors_classifier_pickle(tmpdir, datatype, data_info, keys):
result = {}
def create_mod():
nrows, ncols, n_info, n_classes, k = data_info
X_train, y_train, X_test = make_classification_dataset(datatype,
nrows,
ncols,
n_info,
n_classes)
model = k_neighbors_models[keys](n_neighbors=k)
model.fit(X_train, y_train)
result["neighbors"] = model.predict(X_test)
return model, X_test
def assert_model(pickled_model, X_test):
D_after = pickled_model.predict(X_test)
assert array_equal(result["neighbors"], D_after)
state = pickled_model.__dict__
assert state["n_indices"] == 1
assert "X_m" in state
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('data_info', [unit_param([500, 20, 10, 5]),
stress_param([500000, 1000, 500, 50])])
def test_neighbors_pickle_nofit(tmpdir, datatype, data_info):
result = {}
"""
.. note:: This test digs down a bit far into the
internals of the implementation, but it's
important that regressions do not occur
from changes to the class.
"""
def create_mod():
nrows, ncols, n_info, k = data_info
X_train, _, X_test = make_dataset(datatype, nrows, ncols, n_info)
model = cuml.neighbors.NearestNeighbors()
result["model"] = model
return model, [X_train, X_test]
def assert_model(loaded_model, X):
state = loaded_model.__dict__
assert state["n_indices"] == 0
assert "X_m" not in state
loaded_model.fit(X[0])
state = loaded_model.__dict__
assert state["n_indices"] == 1
assert "X_m" in state
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('keys', dbscan_model.keys())
@pytest.mark.parametrize('data_size', [unit_param([500, 20, 10]),
stress_param([500000, 1000, 500])])
def test_dbscan_pickle(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
X_train, _, _ = make_dataset(datatype, nrows, ncols, n_info)
model = dbscan_model[keys]()
result["dbscan"] = model.fit_predict(X_train)
return model, X_train
def assert_model(pickled_model, X_train):
pickle_after_predict = pickled_model.fit_predict(X_train)
assert array_equal(result["dbscan"], pickle_after_predict)
pickle_save_load(tmpdir, create_mod, assert_model)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('keys', agglomerative_model.keys())
@pytest.mark.parametrize('data_size', [unit_param([500, 20, 10]),
stress_param([500000, 1000, 500])])
def test_agglomerative_pickle(tmpdir, datatype, keys, data_size):
result = {}
def create_mod():
nrows, ncols, n_info = data_size
X_train, _, _ = make_dataset(datatype, nrows, ncols, n_info)
model = agglomerative_model[keys]()
result["agglomerative"] = model.fit_predict(X_train)
return model, X_train
def assert_model(pickled_model, X_train):
pickle_after_predict = pickled_model.fit_predict(X_train)
assert array_equal(result["agglomerative"], pickle_after_predict)
pickle_save_load(tmpdir, create_mod, assert_model)
def test_tsne_pickle(tmpdir):
result = {}
def create_mod():
iris = load_iris()
iris_selection = | np.random.RandomState(42) | numpy.random.RandomState |
import numpy as np
def act(x, d=False):
import act
if d:
return act.sigmoid_D(x)
return act.sigmoid(x)
input_data = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 1, 1, 0],
[1, 1, 1, 0],
[1, 0, 1, 0],
])
output_data = np.array([[1, 1, 0, 1, 0, 1]]).T
weight = [
2 * np.random.random((4,5)) - 1,
2 * | np.random.random((5,5)) | numpy.random.random |
# Copyright (c) 2006, <NAME>
# Copyright (c) 2006-2009, The Regents of the University of California
# Copyright (c) 2021 PickNik Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for tf_transformations."""
import math
import random
import numpy
from tf_transformations import _AXES2TUPLE, clip_matrix, euler_from_quaternion
from tf_transformations import compose_matrix, concatenate_matrices
from tf_transformations import decompose_matrix, euler_from_matrix
from tf_transformations import euler_matrix, identity_matrix, inverse_matrix
from tf_transformations import is_same_transform, orthogonalization_matrix
from tf_transformations import projection_from_matrix, projection_matrix
from tf_transformations import quaternion_about_axis, quaternion_conjugate
from tf_transformations import quaternion_from_euler, quaternion_from_matrix
from tf_transformations import quaternion_inverse, quaternion_matrix
from tf_transformations import quaternion_multiply, quaternion_slerp
from tf_transformations import random_quaternion, random_rotation_matrix
from tf_transformations import random_vector, rotation_matrix
from tf_transformations import reflection_from_matrix, reflection_matrix
from tf_transformations import rotation_from_matrix, scale_from_matrix
from tf_transformations import scale_matrix, shear_from_matrix, shear_matrix
from tf_transformations import superimposition_matrix, translation_matrix
from tf_transformations import translation_from_matrix, unit_vector
from tf_transformations import vector_norm
def test_standard():
alpha, beta, gamma = 0.123, -1.234, 2.345
origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)
Rx = rotation_matrix(alpha, xaxis)
Ry = rotation_matrix(beta, yaxis)
Rz = rotation_matrix(gamma, zaxis)
R = concatenate_matrices(Rx, Ry, Rz)
euler = euler_from_matrix(R, 'rxyz')
assert numpy.allclose([alpha, beta, gamma], euler)
Re = euler_matrix(alpha, beta, gamma, 'rxyz')
assert is_same_transform(R, Re)
al, be, ga = euler_from_matrix(Re, 'rxyz')
assert is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
qx = quaternion_about_axis(alpha, xaxis)
qy = quaternion_about_axis(beta, yaxis)
qz = quaternion_about_axis(gamma, zaxis)
q = quaternion_multiply(qx, qy)
q = quaternion_multiply(q, qz)
Rq = quaternion_matrix(q)
assert is_same_transform(R, Rq)
S = scale_matrix(1.23, origin)
T = translation_matrix((1, 2, 3))
Z = shear_matrix(beta, xaxis, origin, zaxis)
R = random_rotation_matrix(numpy.random.rand(3))
M = concatenate_matrices(T, R, Z, S)
scale, shear, angles, trans, persp = decompose_matrix(M)
assert numpy.allclose(scale, 1.23)
assert numpy.allclose(trans, (1, 2, 3))
assert numpy.allclose(shear, (0, math.tan(beta), 0))
assert is_same_transform(R, euler_matrix(axes='sxyz', *angles))
M1 = compose_matrix(scale, shear, angles, trans, persp)
assert is_same_transform(M, M1)
def test_identity_matrix():
m1 = identity_matrix()
assert numpy.allclose(m1, numpy.dot(m1, m1))
assert numpy.sum(m1) == 4.0
assert numpy.trace(m1) == 4.0
assert numpy.allclose(m1, numpy.identity(4, dtype=numpy.float64))
def test_translation_matrix():
v = numpy.random.random(3) - 0.5
assert numpy.allclose(v, translation_matrix(v)[:3, 3])
def test_translation_from_matrix():
v0 = numpy.random.random(3) - 0.5
v1 = translation_from_matrix(translation_matrix(v0))
assert numpy.allclose(v0, v1)
def test_reflection_matrix():
v0 = numpy.random.random(4) - 0.5
v0[3] = 1.0
v1 = numpy.random.random(3) - 0.5
R = reflection_matrix(v0, v1)
assert numpy.allclose(2., numpy.trace(R))
assert numpy.allclose(v0, numpy.dot(R, v0))
v2 = v0.copy()
v2[:3] += v1
v3 = v0.copy()
v2[:3] -= v1
assert numpy.allclose(v2, numpy.dot(R, v3))
def test_reflection_from_matrix():
v0 = numpy.random.random(3) - 0.5
v1 = numpy.random.random(3) - 0.5
M0 = reflection_matrix(v0, v1)
point, normal = reflection_from_matrix(M0)
M1 = reflection_matrix(point, normal)
assert is_same_transform(M0, M1)
def test_rotation_matrix():
angle = (random.random() - 0.5) * (2*math.pi)
direc = numpy.random.random(3) - 0.5
point = numpy.random.random(3) - 0.5
R0 = rotation_matrix(angle, direc, point)
R1 = rotation_matrix(angle-2*math.pi, direc, point)
assert is_same_transform(R0, R1)
R0 = rotation_matrix(angle, direc, point)
R1 = rotation_matrix(-angle, -direc, point)
assert is_same_transform(R0, R1)
m1 = numpy.identity(4, numpy.float64)
assert numpy.allclose(m1, rotation_matrix(math.pi*2, direc))
m2 = numpy.trace(rotation_matrix(math.pi/2, direc, point))
assert numpy.allclose(2., m2)
def test_rotation_from_matrix():
angle = (random.random() - 0.5) * (2*math.pi)
direc = numpy.random.random(3) - 0.5
point = numpy.random.random(3) - 0.5
R0 = rotation_matrix(angle, direc, point)
angle, direc, point = rotation_from_matrix(R0)
R1 = rotation_matrix(angle, direc, point)
assert is_same_transform(R0, R1)
def test_scale_matrix():
v = (numpy.random.rand(4, 5) - 0.5) * 20.0
v[3] = 1.0
S = scale_matrix(-1.234)
assert numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
factor = random.random() * 10 - 5
origin = numpy.random.random(3) - 0.5
direct = numpy.random.random(3) - 0.5
S = scale_matrix(factor, origin)
S = scale_matrix(factor, origin, direct)
def test_scale_from_matrix():
factor = random.random() * 10 - 5
origin = numpy.random.random(3) - 0.5
direct = numpy.random.random(3) - 0.5
S0 = scale_matrix(factor, origin)
factor, origin, direction = scale_from_matrix(S0)
S1 = scale_matrix(factor, origin, direction)
assert is_same_transform(S0, S1)
S0 = scale_matrix(factor, origin, direct)
factor, origin, direction = scale_from_matrix(S0)
S1 = scale_matrix(factor, origin, direction)
assert is_same_transform(S0, S1)
def test_projection_matrix():
P = projection_matrix((0, 0, 0), (1, 0, 0))
assert numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
point = numpy.random.random(3) - 0.5
normal = numpy.random.random(3) - 0.5
# direct = numpy.random.random(3) - 0.5
persp = numpy.random.random(3) - 0.5
P0 = projection_matrix(point, normal)
# P1 = projection_matrix(point, normal, direction=direct)
P2 = projection_matrix(point, normal, perspective=persp)
P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
assert is_same_transform(P2, numpy.dot(P0, P3))
P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0))
v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0
v0[3] = 1.0
v1 = numpy.dot(P, v0)
assert numpy.allclose(v1[1], v0[1])
assert numpy.allclose(v1[0], 3.0-v1[1])
def test_projection_from_matrix():
point = numpy.random.random(3) - 0.5
normal = numpy.random.random(3) - 0.5
direct = numpy.random.random(3) - 0.5
persp = numpy.random.random(3) - 0.5
P0 = projection_matrix(point, normal)
result = projection_from_matrix(P0)
P1 = projection_matrix(*result)
assert is_same_transform(P0, P1)
P0 = projection_matrix(point, normal, direct)
result = projection_from_matrix(P0)
P1 = projection_matrix(*result)
assert is_same_transform(P0, P1)
P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
result = projection_from_matrix(P0, pseudo=False)
P1 = projection_matrix(*result)
assert is_same_transform(P0, P1)
P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
result = projection_from_matrix(P0, pseudo=True)
P1 = projection_matrix(*result)
assert is_same_transform(P0, P1)
def test_clip_matrix():
frustrum = numpy.random.rand(6)
frustrum[1] += frustrum[0]
frustrum[3] += frustrum[2]
frustrum[5] += frustrum[4]
M = clip_matrix(*frustrum, perspective=False)
assert numpy.allclose(
numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]),
[-1., -1., -1., 1.])
assert numpy.allclose(
numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0]),
[1., 1., 1., 1.])
M = clip_matrix(*frustrum, perspective=True)
v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
assert numpy.allclose(v / v[3], [-1., -1., -1., 1.])
v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0])
assert numpy.allclose(v / v[3], [1., 1., -1., 1.])
def test_shear_matrix():
angle = (random.random() - 0.5) * 4*math.pi
direct = numpy.random.random(3) - 0.5
point = numpy.random.random(3) - 0.5
normal = numpy.cross(direct, numpy.random.random(3))
S = shear_matrix(angle, direct, point, normal)
assert numpy.allclose(1.0, numpy.linalg.det(S))
def test_shear_from_matrix():
angle = (random.random() - 0.5) * 4*math.pi
direct = numpy.random.random(3) - 0.5
point = numpy.random.random(3) - 0.5
normal = numpy.cross(direct, numpy.random.random(3))
S0 = shear_matrix(angle, direct, point, normal)
angle, direct, point, normal = shear_from_matrix(S0)
S1 = shear_matrix(angle, direct, point, normal)
assert is_same_transform(S0, S1)
def test_decompose_matrix():
T0 = translation_matrix((1, 2, 3))
scale, shear, angles, trans, persp = decompose_matrix(T0)
T1 = translation_matrix(trans)
assert numpy.allclose(T0, T1)
S = scale_matrix(0.123)
scale, shear, angles, trans, persp = decompose_matrix(S)
assert scale[0] == 0.123
R0 = euler_matrix(1, 2, 3)
scale, shear, angles, trans, persp = decompose_matrix(R0)
R1 = euler_matrix(*angles)
assert numpy.allclose(R0, R1)
def test_compose_matrix():
scale = numpy.random.random(3) - 0.5
shear = numpy.random.random(3) - 0.5
angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
trans = numpy.random.random(3) - 0.5
persp = numpy.random.random(4) - 0.5
M0 = compose_matrix(scale, shear, angles, trans, persp)
result = decompose_matrix(M0)
M1 = compose_matrix(*result)
assert is_same_transform(M0, M1)
def test_orthogonalization_matrix():
om = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.))
assert numpy.allclose(om[:3, :3], numpy.identity(3, float) * 10)
om = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
assert numpy.allclose(numpy.sum(om), 43.063229)
def test_superimposition_matrix():
v0 = numpy.random.rand(3, 10)
M = superimposition_matrix(v0, v0)
assert numpy.allclose(M, numpy.identity(4))
R = random_rotation_matrix(numpy.random.random(3))
v0 = ((1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 1))
v1 = numpy.dot(R, v0)
M = superimposition_matrix(v0, v1)
assert numpy.allclose(v1, numpy.dot(M, v0))
v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0
v0[3] = 1.0
v1 = numpy.dot(R, v0)
M = superimposition_matrix(v0, v1)
assert numpy.allclose(v1, numpy.dot(M, v0))
S = scale_matrix(random.random())
T = translation_matrix( | numpy.random.random(3) | numpy.random.random |
"""
=============================================================================
Eindhoven University of Technology
==============================================================================
Source Name : trainingUpdate_callback.py
Callback which displays the training graph for the train
and validation set at the end of each X epochs.
If a save directory is provided, the graph is saved
Author : <NAME>
Date : 15/01/2019
Reference : <NAME>, <NAME>, and <NAME>,
"Deep probabilistic subsampling for task-adaptive compressed sensing", 2019
==============================================================================
"""
import keras
import numpy as np
import matplotlib.pyplot as plt
class training_callback(keras.callbacks.Callback):
def __init__(self, outputPerNepochs, outputLastNepochs,savedir,reconVSclassif):
self.outputPerNepochs = outputPerNepochs
self.outputLastNepochs = outputLastNepochs[0]
self.n_epochs = outputLastNepochs[1]
self.savedir = savedir
self.reconVSclassif = reconVSclassif
self.train_MSE_im = []
self.val_MSE_im = []
self.train_PSNR_im = []
self.val_PSNR_im = []
self.train_SSIM_im = []
self.val_SSIM_im = []
self.train_MSE_feat = []
self.val_MSE_feat = []
self.train_acc = []
self.val_acc = []
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
if self.reconVSclassif == 'recon':
self.train_MSE_im.append(logs.get('ImageOutput_mean_squared_error'))
self.val_MSE_im.append(logs.get('val_ImageOutput_mean_squared_error'))
self.train_PSNR_im.append(logs.get('ImageOutput_PSNR'))
self.val_PSNR_im.append(logs.get('val_ImageOutput_PSNR'))
self.train_SSIM_im.append(logs.get('ImageOutput_SSIM'))
self.val_SSIM_im.append(logs.get('val_ImageOutput_SSIM'))
self.train_MSE_feat.append(logs.get('FeatureOutput_mean_squared_error'))
self.val_MSE_feat.append(logs.get('val_FeatureOutput_mean_squared_error'))
else:
self.train_acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
if (epoch+1) % self.outputPerNepochs == 0 or (epoch+1) > (self.n_epochs-self.outputLastNepochs):
if self.reconVSclassif == 'recon':
plt.figure(figsize=(10,10))
plt.gcf().clear()
plt.subplot(221)
plt.plot(np.arange(epoch+1),self.train_MSE_im)
plt.plot(np.arange(epoch+1),self.val_MSE_im)
plt.title('MSE - images')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(['Train','Val'], loc='upper right')
plt.grid()
plt.subplot(222)
plt.plot(np.arange(epoch+1),self.train_PSNR_im)
plt.plot(np.arange(epoch+1),self.val_PSNR_im)
plt.title('PSNR - images')
plt.xlabel('Epoch')
plt.ylabel('PSNR')
plt.legend(['Train','Val'], loc='lower right')
plt.grid()
plt.subplot(223)
plt.plot(np.arange(epoch+1),self.train_SSIM_im)
plt.plot(np.arange(epoch+1),self.val_SSIM_im)
plt.title('SSIM - images')
plt.xlabel('Epoch')
plt.ylabel('SSIM')
plt.legend(['Train','Val'], loc='lower right')
plt.grid()
plt.subplot(224)
plt.plot(np.arange(epoch+1),self.train_MSE_feat)
plt.plot(np.arange(epoch+1),self.val_MSE_feat)
plt.title('MSE - features')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(['Train','Val'], loc='upper right')
plt.grid()
else:
plt.figure()
plt.plot( | np.arange(epoch+1) | numpy.arange |
import csv
import os
import sys
from datetime import datetime, timedelta
from functools import wraps
import numpy as np
if os.getenv("FLEE_TYPE_CHECK") is not None and os.environ["FLEE_TYPE_CHECK"].lower() == "true":
from beartype import beartype as check_args_type
else:
def check_args_type(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@check_args_type
def subtract_dates(date1: str, date2: str) -> int:
"""
Takes two dates %Y-%m-%d format. Returns date1 - date2, measured in days.
Args:
date1 (str): Description
date2 (str): Description
Returns:
int: Description
"""
date_format = "%Y-%m-%d"
a = datetime.strptime(date1, date_format)
b = datetime.strptime(date2, date_format)
delta = a - b
# print(date1,"-",date2,"=",delta.days)
return delta.days
@check_args_type
def steps_to_date(steps: int, start_date: str):
"""
Summary
Args:
steps (int): Description
start_date (str): Description
Returns:
TYPE: Description
"""
# date_format = "%Y-%m-%d"
date_1 = datetime.strptime(start_date, "%Y-%m-%d")
new_date = (date_1 + timedelta(days=steps)).date()
return new_date
@check_args_type
def _processEntry(
row: list,
table: np.ndarray,
data_type: str,
date_column: int,
count_column: int,
start_date: str,
population_scaledown_factor: int = 1,
) -> np.ndarray:
"""
Code to process a population count from a CSV file.
column <date_column> contains the corresponding date in %Y-%m-%d format.
column <count_column> contains the population size on that date.
Args:
row (list): Description
table (np.ndarray): Description
data_type (str): Description
date_column (int): Description
count_column (int): Description
start_date (str): Description
population_scaledown_factor (int, optional): Description
Returns:
np.ndarray: Description
"""
if len(row) < 2:
return table
if row[0][0] == "#":
return table
if row[1] == "":
return table
# Make sure the date column becomes an integer, which contains the offset
# in days relative to the start date.
row[date_column] = subtract_dates(date1=row[date_column], date2=start_date)
if data_type == "int":
table = np.vstack(
[table, [int(row[date_column]), int(row[count_column]) / population_scaledown_factor]]
)
else:
table = np.vstack(
[
table,
[
float(row[date_column]),
float(row[count_column]) / float(population_scaledown_factor),
],
]
)
return table
@check_args_type
def AddCSVTables(table1: np.ndarray, table2: np.ndarray) -> np.ndarray:
"""
Add two time series tables. This version does not yet support interpolation between values.
(The UNHCR data website also does not do this, by the way)
Args:
table1 (np.ndarray): Description
table2 (np.ndarray): Description
Returns:
np.ndarray: Description
"""
table = np.zeros([0, 2])
offset = 0
last_c2 = np.zeros(([1, 2]))
for c2 in table2:
# If table 2 date value is higher, then keep adding entries from table
# 1
while c2[0] > table1[offset][0]:
table = np.vstack([table, [table1[offset][0], last_c2[1] + table1[offset][1]]])
if offset < len(table1) - 1:
offset += 1
else:
break
# If the two match, add a total.
if c2[0] == table1[offset][0]:
table = | np.vstack([table, [c2[0], c2[1] + table1[offset][1]]]) | numpy.vstack |
import nashpy as nash
import numpy as np
from lmm import approximate_equilibrium, normalize
def test1():
"""
Tests against the class rock-paper-scissors example. It can be easily shown that
the best mixed strategy is to choose each option uniformly at random. This provides
an expected payout of
"""
print("Test 1")
# Use existing library to benchmark results on RPS
row_payoff = normalize(np.array([[0, -1, 1], [1, 0, -1], [-1, 1, 0]]))
col_payoff = row_payoff.T
rps = nash.Game(row_payoff)
# Calculate my approximation
epsilon = 0.5
row_strat, col_strat = approximate_equilibrium(row_payoff, col_payoff, epsilon, max_k=None)
# What is our expected reward?
reward = rps[row_strat, col_strat]
print("Approx.:", reward, row_strat, col_strat)
# What is the true Nash equilibria reward? We are close to one of them
for row_opt, col_opt in list(rps.support_enumeration()):
opt_reward = rps[row_opt, col_opt]
print("Exact: ", opt_reward, row_opt, col_opt)
if np.all(np.abs(reward - opt_reward) <= epsilon):
return
# Uh oh! We were close to none of them
assert False
def test2():
"""
Tests against the class rock-paper-scissors example. It can be easily shown that
the best mixed strategy is to choose each option uniformly at random. This provides
an expected payout of
"""
print()
print("Test 2")
# Use existing library to benchmark results on RPS
row_payoff = normalize( | np.array([[3, 0], [5, 1]]) | numpy.array |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from openvino.tools.mo.ops.priorbox_clustered import PriorBoxClusteredOp
from openvino.tools.mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
nodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'data'},
'node_2': {'type': 'Identity', 'value': None, 'kind': 'data'},
'pbc': {'type': 'PriorBoxClustered', 'value': None, 'kind': 'op'},
'node_3': {'type': 'Identity', 'value': None, 'kind': 'data'},
'op_output': { 'kind': 'op', 'op': 'Result'}
}
class TestPriorBoxClusteredPartialInfer(unittest.TestCase):
def test_caffe_priorboxclustered_infer(self):
graph = build_graph(nodes_attributes,
[
('node_1', 'pbc'),
('node_2', 'pbc'),
('pbc', 'node_3'),
('node_3', 'op_output')
],
{
'node_3': {'shape': None},
'node_1': {'shape': | np.array([1, 384, 19, 19]) | numpy.array |
import platform
import numpy as np
import pytest
from sweeps import bayes_search as bayes
def squiggle(x):
return np.exp(-((x - 2) ** 2)) + np.exp(-((x - 6) ** 2) / 10) + 1 / (x ** 2 + 1)
def rosenbrock(x):
return | np.sum((x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0) | numpy.sum |
"""
Script for MCS+
Reliable Query Response
"""
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import sys
import os
from my_community import mycommunity
from multi_arm_bandit import bandit
import networkx as nx
import community
import csv
import numpy as np
import random
import pickle
import operator
import operator
import traceback
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.linear_model import LinearRegression
import time
import multiprocessing
from multiprocessing import Pool
ctheta = 2
def getApproxPartition(graph, nodes=None, single=True):
if graph.number_of_edges() == 0:
return {u:u for u in graph.nodes()}, 0
# Replace with other community detection algorithm if desired
part = community.best_partition(graph)
mod = community.modularity(part, graph)
return part, mod
class LayerImportance(object):
"""Class for handling layer importance realated methods"""
def __init__(self, layer_count):
super(LayerImportance, self).__init__()
self._overlap = [[0.0] for _ in range(0,layer_count)] # importances of layers
self._freshness = [[1.0] for _ in range(0, layer_count)] # Amount of new edges found in previous round
def _updateBaseGraph(self, graph, nodes=None):
"""
The graph against which importance will be calculated
"""
self._base_graph = graph # graph on which importances calulations well be based on
if nodes is not None:
self._base_graph = self._base_graph.subgraph(nodes)
self._base_nodes = set(list(self._base_graph.nodes()))
self._base_edges = set([frozenset(e) for e in self._base_graph.edges()])
def _edgeOverlap(self, graph):
"""
Fraction of edges in graph that are also in base graph
If nodes is None, all the nodes in graph are considered.
Otherwise only subgraph containing nodes is considered.
"""
sg = graph.subgraph(self._base_nodes)
if sg.number_of_edges() == 0:
# If there are no edges in subgraph, return False
return 0.0
edges = set([frozenset(e) for e in sg.edges()])
return len(self._base_edges.intersection(edges))/len(edges)
def _randomEdgeOverLap(self, graph):
"""
Expected fraction of overlap if graph were random
"""
sg = graph.subgraph(self._base_nodes)
if sg.number_of_edges() == 0:
# If there are no edges in subgraph, return False
return 0.0, 1.0
# Edge probality for random graph based on graph
ep = 2 * sg.number_of_edges() / (sg.number_of_nodes())**2
# Number of edges between nodes in base graph
base_edge_count = self._base_graph.subgraph(sg.nodes()).number_of_edges()
# Expected number of overlap edge
mu = base_edge_count * ep
var = np.sqrt(base_edge_count * ep * (1.0 - mu)**2)
# Overlap edges as fraction of all edges in sg
#print(mu, var)
return mu, var
def _computeOverlap(self, graph):
"""
Compute the relative layer importance
"""
val = self._edgeOverlap(graph)
mu, var = self._randomEdgeOverLap(graph)
if var == 0:
i = 0.0
else:
i = np.abs((val - mu)/var)
return max(i, 0.0)
def updateLayerOverlap(self, graphs, nodes=None):
"""
Update the importance of all layers in graphs, and nodes
"""
self._updateBaseGraph(graphs[0], nodes)
for i in range(0, len(graphs)):
overlap = self._computeOverlap(graphs[i])
if overlap is not False:
self._overlap[i].append(overlap)
def updateLayerFreshness(self, i, val):
self._freshness[i].append(val)
def getLayerFreshness(self, layer=None):
# Freshness of the last 5 rounds
if layer is not None:
return np.mean(self._freshness[layer][-3:])
else:
return[np.mean(f[-3:]) for f in self._freshness]
def getLayerOverlap(self, layer=None):
if layer is not None:
return self._overlap[layer][-1]
else:
return [i[-1] for i in self._overlap]
class Budget(object):
"""docstring for Budget"""
def __init__(self, max_budget, layer_costs, layer_importance):
super(Budget, self).__init__()
self._budget_max = max_budget
self._budget_left = max_budget
self._budget_consumed = 0
self._layer_costs = layer_costs
self._slices = 10 # Initial number of slices
self._slices_last_update = 0 # The budget consumed when slice was last updated
self._layer_importance = layer_importance
def initializeBudget(self):
"""
Allocate 10% of max budget to first slice
Allocate enough budget such that same number of queries can
be made in each layer
"""
budget = self._budget_left/self._slices
total_cost = sum(self._layer_costs)
allocated = []
for c in self._layer_costs:
allocated.append(budget * c / total_cost)
return allocated
def consumeBudget(self, cost):
self._budget_consumed += cost
self._budget_left -= cost
def updateSlices(self):
"""
Update number of slices based on cost consumed since last update
"""
if self._budget_consumed == self._slices_last_update:
return True
cost = self._budget_consumed - self._slices_last_update
self._slices = min(self._slices, np.ceil(self._budget_left / cost))
self._slices = max(self._slices, 1)
self._slices_last_update = self._budget_consumed
def allocateBudget(self):
"""
Allocate the budget based on weights
Layers with high weight gets more budget
Budget for layer 0 depends only on layer cost
"""
budget = self._budget_left/self._slices
allocation = []
# Budget for layer 0
b0 = budget * self._layer_costs[0] / np.sum(self._layer_costs)
allocation.append(b0)
n0 = b0 / self._layer_costs[0]
# Remainig budget
budget -= b0
# Total weights excluding layer 0
eta = 0.000000001
weights = [self._layer_importance.getLayerOverlap(l) * self._layer_importance.getLayerFreshness(l) for l in range(1, len(self._layer_costs))]
total_weight = np.sum(weights) + eta * len(weights)
for i in range(0, len(weights)):
b = budget * (weights[i] + eta) / total_weight
b = min(b, n0 * self._layer_costs[i+1])
allocation.append(b)
# Make sure each layer get enough for at least one query
allocation = [max(allocation[i], self._layer_costs[i]) for i in range(0, len(allocation))]
return allocation
def getBudgetLeft(self):
return self._budget_left
def getBudgetConsumed(self):
return self._budget_consumed
class Evaluation(object):
"""docstring for Evaluation"""
def __init__(self, graphs, partition=None):
super(Evaluation, self).__init__()
self._graphs = graphs
# Partitions and communties of full layer 0
if partition is None:
self._partition = self._getPartition(self._graphs[0])
else:
self._partition = partition
self._community = self._getCommunity(self._partition)
self._partition = self._communityToPartition(self._community)
self._cweights = {i:len(self._community[i])/len(self._partition)\
for i in self._community} # Relative size of the communtities
def _getPartition(self, graph):
return community.best_partition(graph, randomize=False)
def _communityToPartition(self, com):
part = {}
for c in com:
for u in com[c]:
part[u] = c
return part
def _getCommunity(self, partition):
com = {}
for n in partition:
p = partition[n]
if p not in com:
com[p] = set()
com[p].update(set([n]))
#com = {c:com[c] for c in com if len(com[c]) > 1}
# Make sure we do not consider the singleton nodes
return com
def _communityQuality(self, x0, x1):
return normalized_mutual_info_score(x0, x1)
def _communityRepresentation(self, com):
m0, m1, s0, s1, eta = 0, 0, 0, 0, 0
com0 = list(self._community.values())
com0 = sorted(com0, key=len, reverse=True)
com1 = list(com.values())
com1 = sorted(com1, key=len, reverse=False)
for i in range(0, len(com0)):
max_sim = 0
max_com = None
for j in range(0, len(com1)):
sim = len(com1[j].intersection(com0[i]))
if sim > max_sim:
max_sim = sim
max_com = j
if max_com is not None:
#com1.pop(max_com)
m0 += np.log10(len(com0[i]) + eta)
#m0 += 1
#break
"""
for i in range(0, len(com1)):
#max_sim = 0
#max_com = None
for j in range(0, len(com0)):
sim = len(com0[j].intersection(com1[i]))
#if sim > max_sim:
# max_sim = sim
# max_com = j
if sim > 0:
m1 += np.log10(len(com1[i]) + eta)
break
"""
#c0 = len(com0)
#print([np.log10(len(c) + eta) for c in com1])
c0 = np.sum([np.log10(len(c) + eta) for c in com0])
#c1 = np.sum([np.log10(len(c) + eta) for c in com1])
if c0 == 0:
return 0.0
return m0 / c0
s0 = m0 / c0
s1 = m1 / c1
#print(s0, s1)
cr = 2 * s0 * s1 / (s0 + s1)
return s0
def communitySimilarity(self, graph, nodes=None):
if graph.number_of_edges() == 0:
return [0,0,0]
part, _ = getApproxPartition(graph, nodes)
#nodes = graph.nodes()
"""
if nodes is None:
# #part = self._getPartition(graph)
part, _ = getApproxPartition(graph)
else:
sg = graph.subgraph(nodes)
if sg.number_of_edges() == 0:
return [0,0,0]
#part = self._getPartition(sg)
part, _ = getApproxPartition(sg)
"""
# Common nodes to perform comparison
part = {u:part[u] for u in nodes}
nodes = set(part.keys()).intersection(self._partition.keys())
#nodes = nodes.intersection(nodes0)
#if nodes is not None and len(nodes) > 0:
#part = {u:part[u] for u in part if u in nodes}
#el
# return 0.0
com = self._getCommunity(part)
x0 = [self._partition[u] for u in nodes]
x1 = [part[u] for u in nodes]
#print(x0, x1)
q = self._communityQuality(x0, x1)
r = self._communityRepresentation(com)
#print(q,r)
if r + q == 0:
return [0,0,0]
return [2 * q * r / (q + r), q, r]
def partitionDistance(self, part1, part2, nodes=None):
"""
Compute the partiton distance between communities c1 and c2
"""
c1 = self._getCommunity(part1)
c2 = self._getCommunity(part2)
if nodes is None:
n1 = set([])
n2 = set([])
for c in c1:
n1.update(c1[c])
for c in c2:
n2.update(c2[c])
nodes = n1.intersection(n2)
c1 = {c:c1[c].intersection(nodes) for c in c1}
c2 = {c:c2[c].intersection(nodes) for c in c2}
m = max(len(c1), len(c2))
m = range(0,m)
mat = {i: {j: 0 for j in c2} for i in c1}
total = 0
for i in c1:
for j in c2:
if i in c1 and j in c2:
mat[i][j] = len(c1[i].intersection(c2[j]))
total += mat[i][j]
if total <= 1:
return 1.0
assignment = []
rows = c1.keys()
cols = c2.keys()
while len(rows) > 0 and len(cols) > 0:
mval = 0
r = -1
c = -1
for i in rows:
for j in cols:
if mat[i][j] >= mval:
mval = mat[i][j]
r = i
c = j
rows.remove(r)
cols.remove(c)
assignment.append(mval)
dist = total - np.sum(assignment)
if np.isnan(dist/total):
return 0
return dist/total
class NodeSelection(object):
"""docstring for NodeSelection"""
def __init__(self, sample):
super(NodeSelection, self).__init__()
self._sample = sample
self._model = None
self._tdata = {'X':[], 'Y':[]}
self._alpha = 0.1 # probability of selecting random node
self._lfeatures = None
def _getFeatures(self, candidates):
degree = nx.degree_centrality(self._sample)
betweeness = nx.betweenness_centrality(self._sample, k=min(10, self._sample.number_of_nodes()))
core = nx.core_number(self._sample)
# Normalize all features between 0 and 1
min_degree, max_degree = min(degree.values()), max(degree.values())
min_betweeness, max_betweeness = min(betweeness.values()), max(betweeness.values())
min_core, max_core = min(core.values()), max(core.values())
vdegree = {u:0 for u in candidates}
vbetweeness = {u:0 for u in candidates}
vcore = {u:0 for u in candidates}
if min_degree < max_degree:
vdegree.update({u: (degree[u] - min_degree)/(max_degree - min_degree) for u in degree})
if min_betweeness < max_betweeness:
vbetweeness.update({u: (betweeness[u] - min_betweeness)/(max_betweeness - min_betweeness) for u in betweeness})
if min_core < max_core:
vcore.update({u: (core[u] - min_core)/(max_core - min_core) for u in core})
features = [[vdegree[u], vbetweeness[u], vcore[u]] for u in candidates]
return features
def nextNode(self, candidates):
if len(candidates) == 0:
self._lfeatures = None
return False
candidates = list(candidates)
features = self._getFeatures(candidates)
if np.random.random() < self._alpha or self._model is None or len(self._tdata['X']) < 5:
m_index = np.random.choice(len(candidates))
else:
Y = self._model.predict(features)
m_index, m_val = -1, -10000
for i in range(0, len(Y)):
if Y[i] > m_val:
m_val = Y[i]
m_index = i
self._lfeatures = features[m_index]
return [candidates[m_index]]
def update(self, y, sample):
self._sample = sample
if self._lfeatures is not None:
self._tdata['X'].append(self._lfeatures)
self._tdata['Y'].append(y)
self._model = LinearRegression().fit(self._tdata['X'], self._tdata['Y'])
class RNDSample(object):
"""docstring for RNDSample"""
def __init__(self, graph, sample, layer_costs, queried, budget, layer_importance):
super(RNDSample, self).__init__()
self._sample = sample
self._graph = graph
self._layer_costs = layer_costs
self._queried = queried
self._unqueried = [set([]) for _ in self._sample]
self._alpha = 0.1 # reset prob for random walk
self._budget = budget
self._layer_importance = layer_importance
self._initializeSample()
def _initializeSample(self):
"""
Initialize sample by adding some random nodes to samples
"""
nodes = sorted(list(self._graph[0].nodes()))[:10]
for i in range(0, len(self._sample)):
self._sample[i].add_nodes_from(nodes)
self._unqueried[i].update(nodes)
def sample(self, budget):
"""
Sample graph with random walk
"""
for i in range(0, len(self._sample)):
self._unqueried[i].difference_update(self._queried[i])
if len(self._unqueried[i]) > 0:
u = np.random.choice(list(self._unqueried[i]))
else:
l = np.random.choice(range(0, len(self._unqueried)))
if len(self._unqueried[l]) > 0:
u = np.random.choice(list(self._unqueried[l]))
else:
u = None
c = 0
edges0 = set([frozenset(e) for e in self._sample[i].edges()])
while c <= budget[i] and u is not None and self._budget.getBudgetLeft() > 0:
c += self._layer_costs[i]
self._budget.consumeBudget(self._layer_costs[i])
try:
neighbors = set(list(self._graph[i].neighbors(u)))
edges = [(u,v) for v in neighbors]
self._sample[i].add_edges_from(edges)
except:
neighbors = []
self._queried[i].update([u])
self._unqueried[i].update(neighbors)
self._unqueried[i].difference_update(self._queried[i])
# If no unqueried node, stop
if len(self._unqueried[i]) == 0:
break
candidates = set(neighbors).difference(self._queried[i])
if np.random.random_sample() > self._alpha and len(candidates) > 0:
u = np.random.choice(list(candidates))
elif len(self._unqueried[i]) > 0:
u = np.random.choice(list(self._unqueried[i]))
else:
break
# Update layer importance
freshness = 0
if self._sample[i].number_of_edges() > 0:
edges1 = set([frozenset(e) for e in self._sample[i].edges()])
freshness = len(edges1.difference(edges0)) / len(edges1)
self._layer_importance.updateLayerFreshness(i, freshness)
class CommunityManager(object):
"""docstring for CBanditManager"""
def __init__(self, hcommunity):
super(CommunityManager, self).__init__()
self._hcommunity = hcommunity
self._initalCommunities()
self._generateMapping()
def _generateMapping(self):
"""
Map int to com ids
"""
for l in range(0,self._hcommunity.getLayerCount()):
c = self._hcommunity.getCommunityIds(l)
m = {i:c[i] for i in range(0, len(c))}
r = {c[i]:i for i in range(0, len(c))}
def _getComName(self, layer, i):
"""
Return com name given layer and id
"""
return self._map[layer][i]
def _initalCommunities(self):
"""
The two initial communities for all layers
"""
roots = self._hcommunity.getRootCommunity()
self._active_communities = []
self._rewards = []
self._crewards = []
for l in range(0, self._hcommunity.getLayerCount()):
coms = self._hcommunity.getChildren(l, roots[l])
self._active_communities.append(coms)
self._crewards.append({c:[] for c in coms})
def getActiveCommunities(self, layer):
return self._active_communities[layer]
def updateCReward(self, layer, cid, value):
#cid = self._map[layer][cid]
self._rewards.append(value)
self._crewards[layer][cid].append(value)
def switchArm(self, layer):
"""
Check rewards to check if active community need to be changed
"""
if np.any([len(self._crewards[layer][l]) for l in self._crewards[layer]] < 5) :
return False
rewards = self._crewards[layer]
cid = self._active_communities[layer]
aval = np.mean(self._rewards)
astd = | np.std(self._rewards) | numpy.std |
# -*- coding: utf-8 -*-
import numpy as np
import os
import sys
import cvxpy as cp
import random
import tkinter
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import GridSearchCV, KFold, train_test_split
from sklearn.decomposition import PCA
from sklearn.datasets import load_digits
from sklearn.metrics import f1_score
from sklearn.utils import shuffle
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn_lvq import GlvqModel, GmlvqModel
from plausible_counterfactuals import LvqCounterfactual, MatrixLvqCounterfactual, FeasibleCounterfactualOfDecisionTree, FeasibleCounterfactualSoftmax, HighDensityEllipsoids
from utils import compare_cf, perturb, load_data_breast_cancer, load_data_digits, load_data_wine
n_kf_splits = 4
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: <dataset_desc> <model_desc>")
else:
datasetdesc = sys.argv[1]
modeldesc = sys.argv[2]
n_prototypes = 3
# Load data
if datasetdesc == "wine":
X, y = load_data_wine();pca_dim = None
elif datasetdesc == "breastcancer":
X, y = load_data_breast_cancer();pca_dim = 5
elif datasetdesc == "digits":
X, y = load_data_digits();pca_dim = 40
X, y = shuffle(X, y, random_state=42)
labels = np.unique(y)
# Global results for plots
dist_perturbed_with_density_constraint = []
dist_perturbed_without_density_constraint = []
# Perturbations
n_features = X.shape[1] # Mask no (gaussian noise) up to half of all features
masked_features = [None]
masked_features += list(range(1, int(n_features / 2) + 1))
for feature_mask in masked_features:
# Results
scores_with_density_constraint = []
scores_without_density_constraint = []
scores_perturbed_with_density_constraint = []
scores_perturbed_without_density_constraint = []
distances_with_density_constraint = []
distances_without_density_constraint = []
distances_perturbed_with_density_constraint = []
distances_perturbed_without_density_constraint = []
original_data = []
original_data_labels = []
cfs_with_density_constraint = []
cfs_without_density_constraint = []
cfs_perturbed_with_density_constraint = []
cfs_perturbed_without_density_constraint = []
cfs_target_label = []
scores_cf_perturbation_dist = []
scores_cf_feasible_perturbation_dist = []
results = {'notFound': 0, 'found': 0}
n_wrong_classification = 0
kf = KFold(n_splits=n_kf_splits)
for train_index, test_index in kf.split(X):
# Split data into training and test set
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# If requested: Reduce dimensionality
X_train_orig = np.copy(X_train)
X_test_orig = np.copy(X_test)
projection_matrix = None
projection_mean_sub = None
pca = None
if pca_dim is not None:
pca = PCA(n_components=pca_dim)
pca.fit(X_train)
projection_matrix = pca.components_ # Projection matrix
projection_mean_sub = pca.mean_
#print(projection_matrix)
X_train = np.dot(X_train - projection_mean_sub, projection_matrix.T)
X_test = np.dot(X_test - projection_mean_sub, projection_matrix.T)
# Fit classifier
model = None
if modeldesc == "glvq":
model = GlvqModel(prototypes_per_class=n_prototypes, random_state=4242)
elif modeldesc == "gmlvq":
model = GmlvqModel(prototypes_per_class=n_prototypes, random_state=4242)
elif modeldesc == "logreg":
model = LogisticRegression(multi_class='multinomial')
elif modeldesc == "dectree":
model = DecisionTreeClassifier(max_depth=7, random_state=42)
model.fit(X_train, y_train)
# Compute accuracy on test set
y_pred = model.predict(X_test)
print(f"F1-score: {f1_score(y_test, y_pred, average='weighted')}")
# Fit model for finding closest samples
closest_samples = ClosestSample(X_train_orig, y_train)
# For each class, fit density estimators
density_estimators = {}
kernel_density_estimators = {}
labels = np.unique(y)
for label in labels:
# Get all samples with the 'correct' label
idx = y_train == label
X_ = X_train[idx, :]
# Optimize hyperparameters
cv = GridSearchCV(estimator=KernelDensity(), iid=False, param_grid={'bandwidth': np.arange(0.1, 10.0, 0.05)}, n_jobs=-1, cv=5)
cv.fit(X_)
bandwidth = cv.best_params_["bandwidth"]
print("bandwidth: {0}".format(bandwidth))
cv = GridSearchCV(estimator=GaussianMixture(covariance_type='full'), iid=False, param_grid={'n_components': range(2, 10)}, n_jobs=-1, cv=5)
cv.fit(X_)
n_components = cv.best_params_["n_components"]
print("n_components: {0}".format(n_components))
# Build density estimators
kde = KernelDensity(bandwidth=bandwidth)
kde.fit(X_)
de = GaussianMixture(n_components=n_components, covariance_type='full')
de.fit(X_)
density_estimators[label] = de
kernel_density_estimators[label] = kde
# For each point in the test set, compute a closest and a plausible counterfactual
n_test = X_test.shape[0]
for i in range(n_test):
x_orig = X_test[i,:]
x_orig_orig = X_test_orig[i,:]
y_orig = y_test[i]
y_target = random.choice(list(filter(lambda l: l != y_test[i], labels)))
if(model.predict([x_orig]) == y_target): # Model already predicts target label!
continue
if(model.predict([x_orig]) != y_orig): # Data point is missclassified
print("Original sample is missclassified")
continue
# Compute counterfactual WITH kernel density constraints
idx = y_train == y_target
X_ = X_train[idx, :]
# Build density estimator
de = density_estimators[y_target]
kde = kernel_density_estimators[y_target]
from scipy.stats import multivariate_normal
densities_training_samples = []
densities_training_samples_ex = []
for j in range(X_.shape[0]):
x = X_[j,:]
z = []
dim = x.shape[0]
for i in range(de.weights_.shape[0]):
x_i = de.means_[i]
w_i = de.weights_[i]
cov = de.covariances_[i]
cov = np.linalg.inv(cov)
b = -2.*np.log(w_i) + dim*np.log(2.*np.pi) - np.log(np.linalg.det(cov))
z.append(np.dot(x - x_i, np.dot(cov, x - x_i)) + b) # NLL
densities_training_samples.append(np.min(z))
densities_training_samples_ex.append(z)
densities_training_samples = np.array(densities_training_samples)
densities_training_samples_ex = np.array(densities_training_samples_ex)
# Compute soft cluster assignments
cluster_prob_ = de.predict_proba(X_)
X_densities = de.score_samples(X_)
density_threshold = np.median(densities_training_samples)
r = HighDensityEllipsoids(X_, densities_training_samples_ex, cluster_prob_, de.means_, de.covariances_, density_threshold).compute_ellipsoids()
# Compute counterfactual
cf = None
if modeldesc == "glvq":
cf = LvqCounterfactual(model, X_, ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub, density_threshold=density_threshold)
elif modeldesc == "gmlvq":
cf = MatrixLvqCounterfactual(model, X_, ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub, density_threshold=density_threshold)
elif modeldesc == "logreg":
cf = FeasibleCounterfactualSoftmax(model.coef_, model.intercept_, X=X_, ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub)
elif modeldesc == "dectree":
cf = FeasibleCounterfactualOfDecisionTree(model, X=X_, ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub)
xcf = cf.compute_counterfactual(x_orig_orig, y_target=y_target, use_density_constraints=False)
if xcf is None:
results["notFound"] += 1
continue
# Compute counterfactual of perturbed sample
x_perturb = perturb(x_orig_orig) # Perturb original data point
x_perturb_t = pca.transform([x_perturb]) if pca is not None else [x_perturb]
if model.predict(x_perturb_t) != y_orig:
print("Perturbed sample is missclassified")
x_perturbed_cf = cf.compute_counterfactual(x_perturb, y_target=y_target, use_density_constraints=False)
if x_perturbed_cf is None:
results["notFound"] += 1
continue
# Compute a plausible counterfatual
cf2 = None
if modeldesc == "glvq":
cf2 = LvqCounterfactual(model, X_, ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub, density_threshold=density_threshold)
elif modeldesc == "gmlvq":
cf2 = MatrixLvqCounterfactual(model, X_, ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub, density_threshold=density_threshold)
elif modeldesc == "logreg":
cf2 = FeasibleCounterfactualSoftmax(model.coef_, model.intercept_, X=X_, ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub, density_threshold=density_threshold)
elif modeldesc == "dectree":
cf2 = FeasibleCounterfactualOfDecisionTree(model, X=X_, ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub, density_threshold=density_threshold)
xcf2 = cf2.compute_counterfactual(x_orig_orig, y_target=y_target, use_density_constraints=True)
if xcf2 is None:
results["notFound"] += 1
continue
# Compute plausible counterfactual of perturbed sample
x_perturbed_cf2 = cf2.compute_counterfactual(x_perturb, y_target=y_target, use_density_constraints=True)
if x_perturbed_cf2 is None:
results["notFound"] += 1
continue
results["found"] += 1
# Evaluate & store results
original_data.append(x_orig_orig)
original_data_labels.append(y_orig)
cfs_with_density_constraint.append(xcf2)
cfs_without_density_constraint.append(xcf)
cfs_perturbed_with_density_constraint.append(x_perturbed_cf2)
cfs_perturbed_without_density_constraint.append(x_perturbed_cf)
cfs_target_label.append(y_target)
distances_with_density_constraint.append(np.sum(np.abs(x_orig_orig - xcf2))) # Store distance before projecting it again for density estimation!
distances_without_density_constraint.append(np.sum(np.abs(x_orig_orig - xcf)))
distances_perturbed_with_density_constraint.append(np.sum(np.abs(x_perturb - x_perturbed_cf2)))
distances_perturbed_without_density_constraint.append(np.sum(np.abs(x_perturb - x_perturbed_cf)))
cf_perturbation_dist = compare_cf(xcf, x_perturbed_cf) # Distance between counterfactual of perturned and original sample
cf_feasible_perturbation_dist = compare_cf(xcf2, x_perturbed_cf2)
scores_cf_perturbation_dist.append(cf_perturbation_dist)
scores_cf_feasible_perturbation_dist.append(cf_feasible_perturbation_dist)
if pca is not None:
xcf = pca.transform([xcf])
xcf2 = pca.transform([xcf2])
x_perturbed_cf = pca.transform([x_perturbed_cf])
x_perturbed_cf2 = pca.transform([x_perturbed_cf2])
scores_without_density_constraint.append(kde.score_samples(xcf.reshape(1, -1)))
scores_with_density_constraint.append(kde.score_samples(xcf2.reshape(1, -1)))
scores_perturbed_without_density_constraint.append(kde.score_samples(xcf.reshape(1, -1)))
scores_perturbed_with_density_constraint.append(kde.score_samples(xcf2.reshape(1, -1)))
if feature_mask is not None:
dist_perturbed_with_density_constraint.append(np.median(scores_cf_feasible_perturbation_dist))
dist_perturbed_without_density_constraint.append(np.median(scores_cf_perturbation_dist))
print(f"Feature mask: {feature_mask}")
print(f"Not found {results['notFound']}/{results['notFound'] + results['found']}")
print("Without density constrain: Median: {0} Mean: {1} Var: {2}".format(np.median(scores_cf_perturbation_dist), np.mean(scores_cf_perturbation_dist), np.var(scores_cf_perturbation_dist)))
print("With density constrain: Median: {0} Mean: {1} Var: {2}".format(np.median(scores_cf_feasible_perturbation_dist), np.mean(scores_cf_feasible_perturbation_dist), np.var(scores_cf_feasible_perturbation_dist)))
print("Unperturbed")
print("Without density constrain: Median: {0} Mean: {1} Var: {2}".format(np.median(scores_without_density_constraint), np.mean(scores_without_density_constraint), np.var(scores_without_density_constraint)))
print("With density constrain: Median: {0} Mean: {1} Var: {2}".format(np.median(scores_with_density_constraint), np.mean(scores_with_density_constraint), np.var(scores_with_density_constraint)))
print("Distances without density constrain: Median: {0} Mean: {1} Var: {2}".format(np.median(distances_without_density_constraint), np.mean(distances_without_density_constraint), np.var(distances_without_density_constraint)))
print("Distances with density constrain: Median: {0} Mean: {1} Var: {2}".format(np.median(distances_with_density_constraint), np.mean(distances_with_density_constraint), np.var(distances_with_density_constraint)))
print("Perturbed")
print("Without density constrain: Median: {0} Mean: {1} Var: {2}".format(np.median(scores_perturbed_without_density_constraint), np.mean(scores_perturbed_without_density_constraint), np.var(scores_perturbed_without_density_constraint)))
print("With density constrain: Median: {0} Mean: {1} Var: {2}".format( | np.median(scores_perturbed_with_density_constraint) | numpy.median |
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
"""Cross Layer Equalization"""
import collections
import typing
import numpy as np
import tensorflow as tf
import libpymo
from aimet_common.utils import AimetLogger
from aimet_tensorflow.keras.batch_norm_fold import fold_all_batch_norms
from aimet_tensorflow.keras.connectedgraph import ConnectedGraph
from aimet_tensorflow.keras.utils import model_transform_utils
from aimet_tensorflow.keras.utils.weight_tensor_utils import WeightTensorUtils
_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.CrosslayerEqualization)
BatchNormFoldedPair = typing.Union[typing.Tuple[tf.keras.layers.Conv2D,
tf.keras.layers.BatchNormalization],
typing.Tuple[tf.keras.layers.Dense,
tf.keras.layers.BatchNormalization]]
ClsSet = typing.Union[typing.Tuple[tf.keras.layers.Conv2D,
tf.keras.layers.Conv2D],
typing.Tuple[tf.keras.layers.Conv2D,
tf.keras.layers.DepthwiseConv2D,
tf.keras.layers.Conv2D]]
ScaleFactor = typing.Union[np.ndarray, typing.Tuple[np.ndarray, np.ndarray]]
ReluFlag = typing.Union[bool, typing.Tuple[bool, bool]]
cls_supported_layers = (tf.keras.layers.Conv2D, tf.keras.layers.Conv1D)
zero_padding_layers = (tf.keras.layers.ZeroPadding2D, tf.keras.layers.ZeroPadding1D)
cls_supported_activations = (tf.keras.layers.ReLU, tf.keras.layers.PReLU)
class ClsSetInfo:
"""
This class hold information about the layers in a CLS set, along with corresponding scaling factors
and other information like if there is a ReLU activation function between the CLS set layers
"""
class ClsSetLayerPairInfo:
"""
Models a pair of layers that were scaled using CLS. And related information.
"""
def __init__(self, layer1: tf.keras.layers.Conv2D, layer2: tf.keras.layers.Conv2D, scale_factor: np.ndarray,
relu_activation_between_layers: bool):
"""
:param layer1: Layer whose bias is folded
:param layer2: Layer to which bias of previous layer's bias is folded
:param scale_factor: Scale Factor found from Cross Layer Scaling to scale BN parameters
:param relu_activation_between_layers: If the activation between layer1 and layer2 is Relu
"""
self.layer1 = layer1
self.layer2 = layer2
self.scale_factor = scale_factor
self.relu_activation_between_layers = relu_activation_between_layers
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.layer1 == other.layer1 and \
self.layer2 == other.layer2 and \
np.allclose(self.scale_factor, other.scale_factor) and \
self.relu_activation_between_layers == other.relu_activation_between_layers
return False
def __init__(self, cls_pair_1: ClsSetLayerPairInfo, cls_pair_2: ClsSetLayerPairInfo = None):
"""
Constructor takes 2 pairs if Depth-wise separable layer is being folded
:param cls_pair_1: Pair between two conv or conv and depth-wise conv
:param cls_pair_2: Pair between depth-wise conv and point-wise conv
"""
if cls_pair_2:
self.cls_pair_info_list = [cls_pair_1, cls_pair_2]
else:
self.cls_pair_info_list = [cls_pair_1]
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.cls_pair_info_list == other.cls_pair_info_list
return False
class GraphSearchUtils:
"""Implements graph search utils required by CLE feature"""
def __init__(self,
model: tf.keras.Model,
input_shapes: typing.Union[None, typing.Tuple,
typing.List[typing.Tuple]]):
"""
:param model: Keras Model (Sequential, Functional, Subclassing)
:param input_shapes: Input shape tuple or list of input tuple shape
"""
self._connected_graph = ConnectedGraph(model, input_shapes)
self._ordered_module_list = self._get_ordered_list_of_conv_modules()
def _get_ordered_list_of_conv_modules(self):
"""
Finds order of nodes in graph
:return: List of name, layer tuples in graph in order
"""
result = []
for op in self._connected_graph.ordered_ops:
layer = op.get_module()
if isinstance(layer, cls_supported_layers):
result.append([layer.name, layer])
return result
def find_layer_groups_to_scale(self) -> typing.List[typing.List[tf.keras.layers.Conv2D]]:
"""
Find layer groups to scale
:return: List of groups of layers. Each group can be independently equalized
"""
# Find the input node(s) in the graph
input_nodes = []
for op in self._connected_graph.get_all_ops().values():
if op.inputs and op.inputs[0].is_model_input:
input_nodes.append(op)
layer_groups = []
for op in input_nodes:
self.find_downstream_layer_groups_to_scale(op, layer_groups)
# Sort the layer groups in order of occurrence in the model
ordered_layer_groups = []
for _, module in self._ordered_module_list:
for layer_group in layer_groups:
if layer_group[0] is module:
ordered_layer_groups.append(layer_group)
return ordered_layer_groups
@staticmethod
def find_downstream_layer_groups_to_scale(op, layer_groups, current_group=None, visited_nodes=None):
"""
Recursive function to find cls layer groups downstream from a given op
:param op: Starting op to search from
:param layer_groups: Running list of layer groups
:param current_group: Running current layer group
:param visited_nodes: Running list of visited nodes (to short-circuit recursion)
:return: None
"""
if not visited_nodes:
visited_nodes = []
if not current_group:
current_group = []
if op in visited_nodes:
return
visited_nodes.append(op)
current_layer = op.get_module()
# Conv2D, Conv1D or its subclass is added to the current group
if current_layer and isinstance(current_layer, cls_supported_layers):
current_group.append(current_layer)
# Terminating condition for current group
if not current_layer or not GraphSearchUtils._is_supported_layer_case(current_layer):
if (len(current_group) > 1) and (current_group not in layer_groups):
layer_groups.append(current_group)
current_group = []
if op.output:
for consumer in op.output.consumers:
GraphSearchUtils.find_downstream_layer_groups_to_scale(consumer, layer_groups,
current_group, visited_nodes)
# Reached a leaf.. See if the current group has something to grab
if (len(current_group) > 1) and (current_group not in layer_groups):
layer_groups.append(current_group)
@staticmethod
def _is_supported_layer_case(layer: tf.keras.layers.Layer) -> bool:
"""
Check if the current layer is CLS supported layers or a supported activation layer
:param layer: tf.keras.layers.Layer
:return: True if it's CLS supported layers or a supported layer
"""
return isinstance(layer, (cls_supported_layers + zero_padding_layers)) or \
GraphSearchUtils._is_supported_activations(layer) or \
GraphSearchUtils.is_folded_batch_normalization(layer)
@staticmethod
def is_folded_batch_normalization(layer: tf.keras.layers.Layer) -> bool:
"""
Method to check if layer is folded batchnorm or not
:param layer: layer to check if it is folded batch norm
:return: True if it is folded batch norm, False if not
"""
if not isinstance(layer, tf.keras.layers.BatchNormalization):
return False
return np.all(layer.beta == 0.0) and np.all(layer.gamma == 1.0)
@staticmethod
def _is_supported_activations(layer: tf.keras.layers.Layer) -> bool:
"""
Check if the current layer is a supported activation layer
:param layer: tf.keras.layers.Layer
:return: True if layer is ReLU, PReLU or Activation with supported type
"""
# Case of explicit layer such as tf.keras.layers.ReLU
if isinstance(layer, cls_supported_activations):
return True
# Case of implicit layer such as tf.keras.layers.Activation(tf.nn.relu)
# Note: PReLU is not supported by implicit approach until TF 2.4
layer_config = layer.get_config()
activation = layer_config.get("activation")
if activation is None:
return False
return activation in ["relu", "relu6"]
@staticmethod
def convert_layer_group_to_cls_sets(layer_group: typing.List[tf.keras.layers.Conv2D]) \
-> typing.List[ClsSet]:
"""
Helper function to convert a layer group to a list of cls sets
:param layer_group: Given layer group to convert
:return: List of cls sets
"""
cls_sets = []
layer_group = collections.deque(layer_group)
prev_layer_to_scale = layer_group.popleft()
while layer_group:
next_layer_to_scale = layer_group.popleft()
if isinstance(next_layer_to_scale, tf.keras.layers.DepthwiseConv2D):
next_non_depthwise_conv_layer = layer_group.popleft()
# DepthwiseConv layer right after DepthwiseConv layer is not currently supported
if isinstance(next_non_depthwise_conv_layer, tf.keras.layers.DepthwiseConv2D):
_logger.error("Consecutive DepthwiseConv layer not currently supported")
raise NotImplementedError
cls_sets.append(
(prev_layer_to_scale, next_layer_to_scale, next_non_depthwise_conv_layer))
prev_layer_to_scale = next_non_depthwise_conv_layer
else:
cls_sets.append((prev_layer_to_scale, next_layer_to_scale))
prev_layer_to_scale = next_layer_to_scale
return cls_sets
@staticmethod
def is_relu_activation_present_in_cls_sets(cls_sets: typing.List[ClsSet]) \
-> typing.List[typing.Union[bool, typing.Tuple[bool, bool]]]:
"""
Check if there is ReLU or PReLU activation between cls sets
:param cls_sets: List of ClsSet to find ReLU activation in
:return: List of ReLU activation preset flags (bool or tuple of bool) corresponding to input cls_sets param
"""
is_relu_activation_in_cls_sets = []
for cls_set in cls_sets:
cls_set = cls_set[:-1]
is_relu_activation_in_cls_set = []
for layer in cls_set:
has_relu_activation = GraphSearchUtils._does_layer_have_relu_activation(layer)
is_relu_activation_in_cls_set.append(has_relu_activation)
if len(is_relu_activation_in_cls_set) == 1:
is_relu_activation_in_cls_sets.append(is_relu_activation_in_cls_set[0])
else:
is_relu_activation_in_cls_sets.append(tuple(is_relu_activation_in_cls_set))
return is_relu_activation_in_cls_sets
@staticmethod
def _does_layer_have_relu_activation(layer: tf.keras.layers.Conv2D) -> bool:
"""
Check if layer has ReLU or PReLU activation function
:param layer: Conv2D or it's subclass to check activation function
:return: True If layer has ReLU or PReLU activation, otherwise False
"""
activation_info = tf.keras.activations.serialize(layer.activation)
if isinstance(activation_info, str):
# Instantiating like tf.keras.layers.Conv2D(8, kernel_size=3, activation=tf.keras.activations.relu)
# has the result of serialization as str type
activation_type = activation_info
elif isinstance(activation_info, dict):
# Instantiating like tf.keras.layers.Conv2D(8, kernel_size=3, activation=tf.keras.layers.ReLU())
# has the result of serialization as dict type
activation_type = activation_info["class_name"].lower()
else:
raise NotImplementedError("Not supported format")
# If activation parameter is not set or None, default activation_type is linear
if activation_type == "linear" and layer.outbound_nodes:
assert len(layer.outbound_nodes) == 1
outbound_layer = layer.outbound_nodes[0].outbound_layer
return isinstance(outbound_layer, (tf.keras.layers.ReLU, tf.keras.layers.PReLU))
return activation_type in ["relu", "prelu"]
class CrossLayerScaling:
"""
Code to apply the cross-layer-scaling technique to a model
"""
@staticmethod
def scale_cls_set_with_conv_layers(
cls_set: typing.Tuple[tf.keras.layers.Conv2D, tf.keras.layers.Conv2D]) -> np.ndarray:
"""
API to invoke equalize layer params (update for weights and bias is in place)
:param cls_set: Consecutive Conv layers Tuple whose weights and biases need to be equalized
:return: Scaling factor S_12 for each conv layer pair: numpy array
"""
for layer in cls_set:
# NOTE: DepthwiseConv2D and Conv2DTranspose is subclass of Conv2D
# The check below covers all of Conv2D, DepthwiseConv2D and Conv2DTranspose class
if not isinstance(layer, tf.keras.layers.Conv2D):
raise ValueError("Only Conv or Transposed Conv layers are supported for CLE")
scaling_factor, prev_layer_params, curr_layer_params = CrossLayerScaling.call_mo_scale(cls_set)
prev_layer, curr_layer = cls_set
weight_and_bias_0 = CrossLayerScaling._unpack_equalization_params(prev_layer, prev_layer_params,
unpack_bias=True)
prev_layer.set_weights(weight_and_bias_0)
weight_and_bias_1 = CrossLayerScaling._unpack_equalization_params(curr_layer, curr_layer_params,
unpack_bias=False)
curr_layer.set_weights(weight_and_bias_1)
return scaling_factor
@staticmethod
def call_mo_scale(cls_set: typing.Tuple[tf.keras.layers.Conv2D, tf.keras.layers.Conv2D]) \
-> typing.Tuple[np.ndarray, libpymo.EqualizationParams, libpymo.EqualizationParams]:
"""
Invokes scale API in model optimization library
:param cls_set: Consecutive Conv layers Tuple whose weights and biases need to be equalized
:return: Scaling factor, prev and current layer updated parameters
"""
prev_layer_params = CrossLayerScaling._pack_equalization_params(cls_set[0], pack_bias=True)
curr_layer_params = CrossLayerScaling._pack_equalization_params(cls_set[1], pack_bias=False)
scaling_factor = libpymo.scaleLayerParams(prev_layer_params, curr_layer_params)
return scaling_factor, prev_layer_params, curr_layer_params
@staticmethod
def scale_cls_set_with_depthwise_conv_layers(
cls_set: typing.Tuple[tf.keras.layers.Conv2D,
tf.keras.layers.DepthwiseConv2D,
tf.keras.layers.Conv2D]) -> typing.Tuple[np.ndarray, np.ndarray]:
"""
API to invoke equalize layer params (update for weights and bias is in place)
:param cls_set: Consecutive Conv layers whose weights and biases need to be equalized.
Second Conv layer is a depth-wise conv and third conv layer is point-wise conv
:return: Scaling factors S_12 and S_23 : numpy arrays
"""
for layer in cls_set:
# NOTE: DepthwiseConv2D and Conv2DTranspose is subclass of Conv2D
# The check below covers all of Conv2D, DepthwiseConv2D and Conv2DTranspose class
if not isinstance(layer, tf.keras.layers.Conv2D):
raise ValueError("Only Conv or Transposed Conv layers are supported for CLE")
scaling_params, prev_layer_params, curr_layer_params, next_layer_params = \
CrossLayerScaling.call_mo_scale_depthwise_separable_layer(cls_set)
prev_layer, curr_layer, next_layer = cls_set
weight_and_bias_0 = CrossLayerScaling._unpack_equalization_params(prev_layer,
prev_layer_params,
unpack_bias=True)
prev_layer.set_weights(weight_and_bias_0)
weight_and_bias_1 = CrossLayerScaling._unpack_equalization_params(curr_layer,
curr_layer_params,
unpack_bias=True)
curr_layer.set_weights(weight_and_bias_1)
weight_and_bias_2 = CrossLayerScaling._unpack_equalization_params(next_layer,
next_layer_params,
unpack_bias=False)
next_layer.set_weights(weight_and_bias_2)
return scaling_params.scalingMatrix12, scaling_params.scalingMatrix23
@staticmethod
def call_mo_scale_depthwise_separable_layer(
cls_set: typing.Tuple[tf.keras.layers.Conv2D,
tf.keras.layers.DepthwiseConv2D,
tf.keras.layers.Conv2D]) -> typing.Tuple[libpymo.RescalingParamsVectors,
libpymo.EqualizationParams,
libpymo.EqualizationParams,
libpymo.EqualizationParams]:
"""
Invokes scale API in model optimization library
:param cls_set: Consecutive Conv layers whose weights and biases need to be equalized
:return: Scaling factors, prev, current and next layer updated parameters
"""
prev_layer_params = CrossLayerScaling._pack_equalization_params(cls_set[0], pack_bias=True)
curr_layer_params = CrossLayerScaling._pack_equalization_params(cls_set[1], pack_bias=True)
next_layer_params = CrossLayerScaling._pack_equalization_params(cls_set[2], pack_bias=False)
scaling_params = libpymo.scaleDepthWiseSeparableLayer(prev_layer_params, curr_layer_params, next_layer_params)
return scaling_params, prev_layer_params, curr_layer_params, next_layer_params
@staticmethod
def _pack_equalization_params(layer: tf.keras.layers.Conv2D, pack_bias: bool) -> libpymo.EqualizationParams:
equalization_params = libpymo.EqualizationParams()
param_tensors = layer.get_weights()
weight_tensor = param_tensors[0]
weight_tensor = WeightTensorUtils.transpose_from_tf_to_libpymo_format(weight_tensor, layer)
equalization_params.weight = weight_tensor.reshape(-1)
equalization_params.weightShape = np.array(weight_tensor.shape)
if pack_bias:
if layer.use_bias:
equalization_params.bias = param_tensors[1]
else:
equalization_params.isBiasNone = True
return equalization_params
@staticmethod
def _unpack_equalization_params(layer: tf.keras.layers.Conv2D,
equalization_params: libpymo.EqualizationParams,
unpack_bias: bool) -> typing.List:
weight_tensor = np.reshape(equalization_params.weight, equalization_params.weightShape)
weight_tensor = WeightTensorUtils.transpose_from_libpymo_to_tf_format(weight_tensor, layer)
if layer.use_bias:
if unpack_bias:
bias_tensor = np.reshape(equalization_params.bias, equalization_params.weightShape[0])
else:
_, bias_tensor = layer.get_weights()
param_tensors = [weight_tensor, bias_tensor]
else:
param_tensors = [weight_tensor]
return param_tensors
@staticmethod
def scale_cls_sets(cls_sets: typing.List[ClsSet]) -> \
typing.List[typing.Union[np.ndarray, typing.Tuple[np.ndarray, np.ndarray]]]:
"""
Scale each cls set
:param cls_sets: Cls sets to scale
:return: List of scale factors corresponding to each scaled cls set
"""
scale_factor_list = []
for cls_set in cls_sets:
if len(cls_set) == 3:
scale_factor = CrossLayerScaling.scale_cls_set_with_depthwise_conv_layers(cls_set)
else:
scale_factor = CrossLayerScaling.scale_cls_set_with_conv_layers(cls_set)
scale_factor_list.append(scale_factor)
return scale_factor_list
@staticmethod
def create_cls_set_info_list(cls_sets: typing.List[ClsSet],
scale_factors: typing.List[ScaleFactor],
is_relu_activation_in_cls_sets: typing.List[ReluFlag]) -> typing.List[ClsSetInfo]:
"""
Binds information from there separate lists into one [ClsInfoSet] data structure
:param cls_sets: List of CLS sets
:param scale_factors: List of scale factors for each cls set
:param is_relu_activation_in_cls_sets: List of ReLU flag whether there is ReLU activation in each cls set
:return: List of ClsSetInfo
"""
assert len(cls_sets) == len(scale_factors) == len(is_relu_activation_in_cls_sets)
cls_set_info_list = []
for cls_set, scale_factor, has_relu_activation in zip(cls_sets,
scale_factors,
is_relu_activation_in_cls_sets):
# Depthwise separable convolution layer case (triplet of layers)
# Should have two scale factors and ReLU flags
if isinstance(scale_factor, tuple):
assert len(cls_set) == 3
assert len(scale_factor) == len(has_relu_activation) == 2
prev_layer, curr_layer, next_layer = cls_set
cls_pair_1 = ClsSetInfo.ClsSetLayerPairInfo(prev_layer, curr_layer,
scale_factor[0], has_relu_activation[0])
cls_pair_2 = ClsSetInfo.ClsSetLayerPairInfo(curr_layer, next_layer,
scale_factor[1], has_relu_activation[1])
cls_set_info = ClsSetInfo(cls_pair_1, cls_pair_2)
# Standard convolution layer case (tuple of layers)
# Should have one scale factor and ReLU flag
else:
prev_layer, curr_layer = cls_set
cls_pair = ClsSetInfo.ClsSetLayerPairInfo(prev_layer, curr_layer,
scale_factor, has_relu_activation)
cls_set_info = ClsSetInfo(cls_pair)
cls_set_info_list.append(cls_set_info)
return cls_set_info_list
@staticmethod
def scale_model(model: tf.keras.Model,
input_shapes: typing.Union[None,
typing.Tuple,
typing.List[typing.Tuple]]) -> typing.List[ClsSetInfo]:
"""
Uses cross-layer scaling to scale all applicable layers in the given model
:param model: tf.keras.Model
:param input_shapes: input_shapes: Input shape tuple or list of input tuple shape
:return: CLS information for each CLS set
"""
# Find layer groups
graph_search_util = GraphSearchUtils(model, input_shapes)
layer_groups = graph_search_util.find_layer_groups_to_scale()
# Find cls sets from the layer groups
cls_sets = []
for layer_group in layer_groups:
cls_set = GraphSearchUtils.convert_layer_group_to_cls_sets(layer_group)
cls_sets += cls_set
# Scale the CLS sets
scale_factors = CrossLayerScaling.scale_cls_sets(cls_sets)
# Find if there were ReLU activations between layers of each cls set
is_relu_activation_in_cls_sets = graph_search_util.is_relu_activation_present_in_cls_sets(cls_sets)
# Convert to a list of cls set info elements
cls_set_info_list = CrossLayerScaling.create_cls_set_info_list(cls_sets,
scale_factors,
is_relu_activation_in_cls_sets)
return cls_set_info_list
class HighBiasFold:
"""
Code to apply the high-bias-fold technique to a model
"""
@staticmethod
def bias_fold(cls_set_info_list: typing.List[ClsSetInfo],
bn_layers: typing.Dict[tf.keras.layers.Conv2D, tf.keras.layers.BatchNormalization]):
"""
Folds bias values greater than 3 * sigma to next layer's bias
:param cls_set_info_list: List of info elements for each cls set
:param bn_layers: Key: Conv/Linear layer Value: Corresponding folded BN layer
"""
if not bn_layers:
_logger.info('High Bias folding is not supported for models without BatchNorm Layers')
return
for cls_set_info in cls_set_info_list:
for cls_pair_info in cls_set_info.cls_pair_info_list:
if (not cls_pair_info.layer1.use_bias) or (not cls_pair_info.layer2.use_bias) or \
(cls_pair_info.layer1 not in bn_layers):
continue
prev_layer_params, curr_layer_params = HighBiasFold.call_mo_high_bias_fold(cls_pair_info, bn_layers)
layer1 = cls_pair_info.layer1
layer1_weight_tensor, _ = layer1.get_weights()
layer1_bias_tensor = | np.array(prev_layer_params.bias) | numpy.array |
import time
import numpy as np
import pandas as pd
from scipy import interpolate
import copy
from pandas.api.types import is_string_dtype
def get_GAM_df_by_models(models, x_values_lookup=None, aggregate=True):
models = iter(models)
first_model = next(models)
first_df = first_model.get_GAM_df(x_values_lookup)
is_x_values_lookup_none = x_values_lookup is None
if is_x_values_lookup_none:
x_values_lookup = first_df[['feat_name', 'x']].set_index('feat_name').x.to_dict()
all_dfs = [first_df]
for model in models:
the_df = model.get_GAM_df(x_values_lookup)
all_dfs.append(the_df)
if not aggregate:
return all_dfs
if len(all_dfs) == 1:
return first_df
all_ys = [np.concatenate(df.y) for df in all_dfs]
split_pts = first_df.y.apply(lambda x: len(x)).cumsum()[:-1]
first_df['y'] = np.split(np.mean(all_ys, axis=0), split_pts)
first_df['y_std'] = np.split(np.std(all_ys, axis=0), split_pts)
return first_df
def predict_score(model, X):
result = predict_score_with_each_feature(model, X)
return result.values.sum(axis=1)
def predict_score_by_df(GAM_plot_df, X):
result = predict_score_with_each_feature_by_df(GAM_plot_df, X, sum_directly=True)
return result
def predict_score_with_each_feature(model, X):
x_values_lookup = get_x_values_lookup(X, model.feature_names)
GAM_plot_df = model.get_GAM_df(x_values_lookup)
return predict_score_with_each_feature_by_df(GAM_plot_df, X)
def predict_score_with_each_feature_by_df(GAM_plot_df, X, sum_directly=False):
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=GAM_plot_df.feat_name.iloc[1:(X.shape[1]+1)].values.tolist())
from tqdm import tqdm
if sum_directly:
scores = np.zeros((X.shape[0]))
else:
scores = np.empty((X.shape[0], GAM_plot_df.shape[0]))
for f_idx, attrs in tqdm(GAM_plot_df.iterrows()):
if attrs.feat_idx == -1:
offset = attrs.y[0]
if sum_directly:
scores += offset
else:
scores[:, f_idx] = offset
continue
feat_idx = attrs.feat_idx if not isinstance(attrs.feat_idx, tuple) else list(attrs.feat_idx)
truncated_X = X.iloc[:, feat_idx]
if isinstance(attrs.feat_idx, tuple):
score_lookup = pd.Series(attrs.y, index=attrs.x)
truncated_X = pd.MultiIndex.from_frame(truncated_X) # list(truncated_X.itertuples(index=False, name=None))
else:
score_lookup = pd.Series(attrs.y, index=attrs.x)
truncated_X = truncated_X.values
if sum_directly:
scores += score_lookup[truncated_X].values
else:
scores[:, (f_idx)] = score_lookup[truncated_X].values
if sum_directly:
return scores
else:
return pd.DataFrame(scores, columns=GAM_plot_df.feat_name.values.tolist())
def sigmoid(x):
"Numerically stable sigmoid function."
return np.where(x >= 0,
1 / (1 + np.exp(-x)),
np.exp(x) / (1 + np.exp(x)))
def get_X_values_counts(X, feature_names=None):
if feature_names is None:
feature_names = ['f%d' % i for i in range(X.shape[1])] \
if isinstance(X, np.ndarray) else X.columns
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=feature_names)
# return {'f%d' % idx: dict(zip(*np.unique(X[:, idx], return_counts=True))) for idx in range(X.shape[1])}
return X.apply(lambda x: x.value_counts().sort_index().to_dict(), axis=0)
def bin_data(X, max_n_bins=256):
'''
Do a quantile binning for the X
'''
X = X.copy()
for col_name, dtype in zip(X.dtypes.index, X.dtypes):
if is_string_dtype(dtype): # categorical
continue
col_data = X[col_name].astype(np.float32)
uniq_vals = np.unique(col_data[~np.isnan(col_data)])
if len(uniq_vals) > max_n_bins:
print(f'bin features {col_name} with uniq val {len(uniq_vals)} to only {max_n_bins}')
bins = np.unique(
np.quantile(
col_data, q=np.linspace(0, 1, max_n_bins + 1),
)
)
_, bin_edges = np.histogram(col_data, bins=bins)
digitized = np.digitize(col_data, bin_edges, right=False)
digitized[digitized == 0] = 1
digitized -= 1
# NOTE: NA handling done later.
# digitized[np.isnan(col_data)] = self.missing_constant
X.loc[:, col_name] = pd.Series(bins)[digitized].values.astype(np.float32)
return X
def get_x_values_lookup(X, feature_names=None):
if isinstance(X, np.ndarray):
if feature_names is None:
feature_names = ['f%d' for idx in range(X.shape[1])]
X = pd.DataFrame(X, columns=feature_names)
else:
feature_names = X.columns
return {
feat_name : np.unique(X.iloc[:, feat_idx]).astype(X.dtypes[feat_idx])
for feat_idx, feat_name in enumerate(feature_names)
}
def my_interpolate(x, y, new_x):
''' Handle edge cases for interpolation '''
assert len(x) == len(y)
if len(x) == 1:
y = np.full(len(new_x), y[0])
else:
f = interpolate.interp1d(x, y, fill_value='extrapolate', kind='nearest')
y = f(new_x.astype(float))
return y
class Timer:
def __init__(self, name, remove_start_msg=True):
self.name = name
self.remove_start_msg = remove_start_msg
def __enter__(self):
self.start_time = time.time()
print('Run "%s".........' % self.name, end='\r' if self.remove_start_msg else '\n')
def __exit__(self, exc_type, exc_val, exc_tb):
time_diff = float(time.time() - self.start_time)
time_str = '{:.1f}s'.format(time_diff) if time_diff >= 1 else '{:.0f}ms'.format(time_diff * 1000)
print('Finish "{}" in {}'.format(self.name, time_str))
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for k, v in self.items():
if isinstance(v, dict) and not isinstance(v, DotDict):
self[k] = DotDict(v)
def __deepcopy__(self, memo):
return DotDict(copy.deepcopy(dict(self), memo=memo))
def extract_GAM(X, predict_fn, predict_type='binary_logodds', max_n_bins=None):
'''
X: input 2d array
predict_fn: the model prediction function
predict_type: choose from ["binary_logodds", "binary_prob", "regression"]
This corresponds to which predict_fn to pass in.
max_n_bins: default set as None (No binning). It bins the value into
this number of buckets to reduce the resulting GAM graph clutterness.
Should set large enough to not change prediction too much.
'''
assert isinstance(X, pd.DataFrame)
if max_n_bins is not None:
X = bin_data(X, max_n_bins=max_n_bins)
X_values_counts = get_X_values_counts(X)
keys = list(X_values_counts.keys())
# Use the X_values_counts to produce the Xs
log_odds = {'offset': {'y_val': 0.}}
for feat_name in keys:
all_xs = list(X_values_counts[feat_name].keys())
log_odds[feat_name] = {
'x_val': np.array(all_xs),
'y_val': np.zeros(len(all_xs), dtype=np.float32),
}
# Extract the GAM value from the model
split_lens = [len(log_odds[f_name]['x_val']) for f_name in keys]
cum_lens = np.cumsum(split_lens)
first_record = X.iloc[0].values
all_X = first_record.reshape((1, -1)).repeat(1 + np.sum(split_lens), axis=0)
for f_idx, (feature_name, s_idx, e_idx) in enumerate(
zip(keys, [0] + cum_lens[:-1].tolist(), cum_lens)):
x = log_odds[feature_name]['x_val']
all_X[(1 + s_idx):(1 + e_idx), f_idx] = x
if predict_type in ['binary_logodds', 'regression']:
score = predict_fn(all_X)
elif predict_type == 'binary_prob':
eps = 1e-8
prob = predict_fn(all_X)
prob = np.clip(prob, eps, 1. - eps)
score = np.log(prob) - | np.log(1. - prob) | numpy.log |
import cv2
import numpy as np
from PIL import Image
import scipy
import scipy.ndimage.filters as filters
import scipy.ndimage.morphology as morphology
import numpy as np
from rtree import index
import sys
import pickle
from common import *
vector_norm = 25.0
def vNorm(v1):
l = distance(v1,(0,0))+0.0000001
return (v1[0]/l, v1[1]/l)
def anglediff(v1, v2):
v1 = vNorm(v1)
v2 = vNorm(v2)
return v1[0]*v2[0] + v1[1] * v2[1]
def graph_refine(graph, isolated_thr = 150, spurs_thr = 30, three_edge_loop_thr = 70):
neighbors = graph
gid = 0
grouping = {}
for k, v in neighbors.iteritems():
if k not in grouping:
# start a search
queue = [k]
while len(queue) > 0:
n = queue.pop(0)
if n not in grouping:
grouping[n] = gid
for nei in neighbors[n]:
queue.append(nei)
gid += 1
group_count = {}
for k, v in grouping.items():
if v not in group_count:
group_count[v] = (1,0)
else:
group_count[v] = (group_count[v][0] + 1, group_count[v][1])
for nei in neighbors[k]:
a = k[0] - nei[0]
b = k[1] - nei[1]
d = np.sqrt(a*a + b*b)
group_count[v] = (group_count[v][0], group_count[v][1] + d/2)
# short spurs
remove_list = []
for k, v in neighbors.items():
if len(v) == 1:
if len(neighbors[v[0]]) >= 3:
a = k[0] - v[0][0]
b = k[1] - v[0][1]
d = np.sqrt(a*a + b*b)
if d < spurs_thr:
remove_list.append(k)
remove_list2 = []
remove_counter = 0
new_neighbors = {}
def isRemoved(k):
gid = grouping[k]
if group_count[gid][0] <= 1:
return True
elif group_count[gid][1] <= isolated_thr:
return True
elif k in remove_list:
return True
elif k in remove_list2:
return True
else:
return False
for k, v in neighbors.items():
if isRemoved(k):
remove_counter += 1
pass
else:
new_nei = []
for nei in v:
if isRemoved(nei):
pass
else:
new_nei.append(nei)
new_neighbors[k] = list(new_nei)
#print(len(new_neighbors), "remove", remove_counter, "nodes")
return new_neighbors
def graph_shave(graph, spurs_thr = 50):
neighbors = graph
# short spurs
remove_list = []
for k, v in neighbors.items():
if len(v) == 1:
d = distance(k,v[0])
cur = v[0]
l = [k]
while True:
if len(neighbors[cur]) >= 3:
break
elif len(neighbors[cur]) == 1:
l.append(cur)
break
else:
if neighbors[cur][0] == l[-1]:
next_node = neighbors[cur][1]
else:
next_node = neighbors[cur][0]
d += distance(cur, next_node)
l.append(cur)
cur = next_node
if d < spurs_thr:
for n in l:
if n not in remove_list:
remove_list.append(n)
def isRemoved(k):
if k in remove_list:
return True
else:
return False
new_neighbors = {}
remove_counter = 0
for k, v in neighbors.items():
if isRemoved(k):
remove_counter += 1
pass
else:
new_nei = []
for nei in v:
if isRemoved(nei):
pass
else:
new_nei.append(nei)
new_neighbors[k] = list(new_nei)
#print("shave", len(new_neighbors), "remove", remove_counter, "nodes")
return new_neighbors
def graph_refine_deloop(neighbors, max_step = 10, max_length = 200, max_diff = 5):
removed = []
impact = []
remove_edge = []
new_edge = []
for k, v in neighbors.items():
if k in removed:
continue
if k in impact:
continue
if len(v) < 2:
continue
for nei1 in v:
if nei1 in impact:
continue
if k in impact:
continue
for nei2 in v:
if nei2 in impact:
continue
if nei1 == nei2 :
continue
if neighbors_cos(neighbors, k, nei1, nei2) > 0.984:
l1 = neighbors_dist(neighbors, k, nei1)
l2 = neighbors_dist(neighbors, k, nei2)
#print("candidate!", l1,l2,neighbors_cos(neighbors, k, nei1, nei2))
if l2 < l1:
nei1, nei2 = nei2, nei1
remove_edge.append((k,nei2))
remove_edge.append((nei2,k))
new_edge.append((nei1, nei2))
impact.append(k)
impact.append(nei1)
impact.append(nei2)
break
new_neighbors = {}
def isRemoved(k):
if k in removed:
return True
else:
return False
for k, v in neighbors.items():
if isRemoved(k):
pass
else:
new_nei = []
for nei in v:
if isRemoved(nei):
pass
elif (nei, k) in remove_edge:
pass
else:
new_nei.append(nei)
new_neighbors[k] = list(new_nei)
for new_e in new_edge:
nk1 = new_e[0]
nk2 = new_e[1]
if nk2 not in new_neighbors[nk1]:
new_neighbors[nk1].append(nk2)
if nk1 not in new_neighbors[nk2]:
new_neighbors[nk2].append(nk1)
#print("remove %d edges" % len(remove_edge))
return new_neighbors, len(remove_edge)
def locate_stacking_road(graph):
idx = index.Index()
edges = []
for n1, v in graph.items():
for n2 in v:
if (n1,n2) in edges or (n2,n1) in edges:
continue
x1 = min(n1[0], n2[0])
x2 = max(n1[0], n2[0])
y1 = min(n1[1], n2[1])
y2 = max(n1[1], n2[1])
idx.insert(len(edges), (x1,y1,x2,y2))
edges.append((n1,n2))
adjustment = {}
crossing_point = {}
for edge in edges:
n1 = edge[0]
n2 = edge[1]
x1 = min(n1[0], n2[0])
x2 = max(n1[0], n2[0])
y1 = min(n1[1], n2[1])
y2 = max(n1[1], n2[1])
candidates = list(idx.intersection((x1,y1,x2,y2)))
for _candidate in candidates:
# todo mark the overlap point
candidate = edges[_candidate]
if n1 == candidate[0] or n1 == candidate[1] or n2 == candidate[0] or n2 == candidate[1]:
continue
if intersect(n1,n2,candidate[0], candidate[1]):
ip = intersectPoint(n1,n2,candidate[0], candidate[1])
if (candidate, edge) not in crossing_point:
crossing_point[(edge, candidate)] = ip
#release points
d = distance(ip, n1)
thr = 5.0
if d < thr:
vec = neighbors_norm(graph, n1, n2)
#vec = (vec[0] * (thr-d), vec[1] * (thr-d))
if n1 not in adjustment:
adjustment[n1] = [vec]
else:
adjustment[n1].append(vec)
d = distance(ip, n2)
if d < thr:
vec = neighbors_norm(graph, n2, n1)
#vec = (vec[0] * (thr-d), vec[1] * (thr-d))
if n2 not in adjustment:
adjustment[n2] = [vec]
else:
adjustment[n2].append(vec)
c1 = candidate[0]
c2 = candidate[1]
d = distance(ip, c1)
if d < thr:
vec = neighbors_norm(graph, c1, c2)
#vec = (vec[0] * (thr-d), vec[1] * (thr-d))
if c1 not in adjustment:
adjustment[c1] = [vec]
else:
adjustment[c1].append(vec)
d = distance(ip, c2)
if d < thr:
vec = neighbors_norm(graph, c2, c1)
#vec = (vec[0] * (thr-d), vec[1] * (thr-d))
if c2 not in adjustment:
adjustment[c2] = [vec]
else:
adjustment[c2].append(vec)
return crossing_point, adjustment
def _vis(_node_neighbors, save_file, size=2048, bk=None, draw_intersection = False):
node_neighbors = _node_neighbors
img = np.ones((size, size, 3), dtype=np.uint8) * 255
color_node = (255,0,0)
if bk is not None:
img = scipy.ndimage.imread(bk)
img = img.astype(np.float)
img = (img - 127)*0.75 + 127
img = img.astype(np.uint8)
color_edge = (0,255,255) # yellow
else:
color_edge = (0,0,0) # black
edge_width = 2
for k,v in node_neighbors.iteritems():
n1 = k
for n2 in v:
cv2.line(img, (n1[1], n1[0]), (n2[1], n2[0]), color_edge,edge_width)
scale = 1
for k,v in node_neighbors.iteritems():
n1 = k
cv2.circle(img, (int(n1[1]) * scale,int(n1[0]) * scale), 2, (255,0,0),-1)
cp, _ = locate_stacking_road(node_neighbors)
for k, v in cp.iteritems():
e1 = k[0]
e2 = k[1]
if draw_intersection == True:
cv2.line(img, (int(e1[0][1]),int(e1[0][0])), (int(e1[1][1]),int(e1[1][0])), (0,255,0),edge_width)
cv2.line(img, (int(e2[0][1]),int(e2[0][0])), (int(e2[1][1]),int(e2[1][0])), (0,0,255),edge_width)
Image.fromarray(img).save(save_file)
def detect_local_minima(arr, mask, threshold = 0.5):
# https://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710
"""
Takes an array and detects the troughs using the local maximum filter.
Returns a boolean mask of the troughs (i.e. 1 when
the pixel's value is the neighborhood maximum, 0 otherwise)
"""
# define an connected neighborhood
# http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure
neighborhood = morphology.generate_binary_structure(len(arr.shape),2)
# apply the local minimum filter; all locations of minimum value
# in their neighborhood are set to 1
# http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter
local_min = (filters.minimum_filter(arr, footprint=neighborhood)==arr)
# local_min is a mask that contains the peaks we are
# looking for, but also the background.
# In order to isolate the peaks we must remove the background from the mask.
#
# we create the mask of the background
background = (arr==0)
#
# a little technicality: we must erode the background in order to
# successfully subtract it from local_min, otherwise a line will
# appear along the background border (artifact of the local minimum filter)
# http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion
eroded_background = morphology.binary_erosion(
background, structure=neighborhood, border_value=1)
#
# we obtain the final mask, containing only peaks,
# by removing the background from the local_min mask
detected_minima = local_min ^ eroded_background
return np.where((detected_minima & (mask > threshold)))
#return np.where(detected_minima)
def DrawKP(imagegraph, filename, imagesize=256, max_degree=6):
vertexness = imagegraph[:,:,0].reshape((imagesize, imagesize))
for i in range(max_degree):
vertexness = np.maximum(vertexness, imagegraph[:,:,2+4*i].reshape((imagesize, imagesize)))
kp = np.copy(vertexness)
smooth_kp = scipy.ndimage.filters.gaussian_filter(np.copy(kp), 1)
smooth_kp = smooth_kp / max(np.amax(smooth_kp),0.001)
Image.fromarray((smooth_kp*255.0).astype(np.uint8)).save(filename)
# Main function
def DecodeAndVis(imagegraph, filename, imagesize=256, max_degree=6, thr=0.5, edge_thr = 0.5, snap=False, kp_limit = 500, drop=True, use_graph_refine=True, testing=False, spacenet = False, angledistance_weight = 100, snap_dist = 15):
# At the very begining of the training, the vertexness output can be very noisy.
# The decoder algorithm may find too many keypoints (vertices) and run very slowly.
# To avoid this slowdown, we limit the total number of the keypoints during training.
kp_limit = 10000000
if imagesize < 600:
kp_limit = 500
if testing :
kp_limit = 10000000
# Create numpy arrays for visualization.
if snap :
rgb = np.zeros((imagesize*4, imagesize*4, 3), dtype=np.uint8)
rgb2 = np.zeros((imagesize*4, imagesize*4, 3), dtype=np.uint8)
else:
rgb = 255 * np.ones((imagesize*4, imagesize*4, 3), dtype=np.uint8)
rgb2 = 255 * np.ones((imagesize*4, imagesize*4, 3), dtype=np.uint8)
# Step-1: Find vertices
# Step-1 (a): Find vertices through local minima detection.
vertexness = imagegraph[:,:,0].reshape((imagesize, imagesize))
kp = np.copy(vertexness)
smooth_kp = scipy.ndimage.filters.gaussian_filter(np.copy(kp), 1)
smooth_kp = smooth_kp / max( | np.amax(smooth_kp) | numpy.amax |
import numpy as np
import math
def calcmagForce():
diameter = input("Input diameter (cm): ")
current = input("Input the Current (A): ")
diameter = float(diameter)
current = float(current)
magneticFieldI = input("Magnetic Field i component: ")
magneticFieldJ = input("Magnetic Field j component: ")
vectorI = input("Input i component of vector: ")
vectorJ = input("Input j component of vector: ")
magneticFieldI = float(magneticFieldI)
magneticFieldJ = float(magneticFieldJ)
vectorI = float(vectorI)
vectorJ = float(vectorJ)
modifer = current * math.pi * pow((diameter/200),2)
vector = np.array([vectorI*modifer,vectorJ*modifer])
magneticField = np.array([magneticFieldI,magneticFieldJ])
BxV = | np.dot(magneticField,vector) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 17 15:58:22 2020
@author: vivek
"""
### statsmodels vs sklearn
# both packages are frequently tagged with python, statistics, and data-analysis
# differences between them highlight what each in particular has to offer:
# scikit-learn’s other popular topics are machine-learning and data-science;
# StatsModels are econometrics, generalized-linear-models, timeseries-analysis, and regression-models
### Introduction
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
# Example 1
# Load data
dat = sm.datasets.get_rdataset("Guerry", "HistData").data
# Fit regression model (using the natural log of one of the regressors)
results = smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=dat).fit()
# Inspect the results
print(results.summary())
# Example 2
# Generate artificial data (2 regressors + constant)
X = np.random.random((100, 2))
X = sm.add_constant(X)
beta = [1, .1, .5]
e = np.random.random(100)
y = np.dot(X, beta) + e
# Fit regression model
results = sm.OLS(y, X).fit()
# Inspect the results
print(results.summary())
### Getting started
# very simple case-study is designed to get you up-and-running quickly with statsmodels
import statsmodels.api as sm
import pandas
from patsy import dmatrices # patsy is a Python library for describing statistical models and building Design Matrices using R-like formulas
# import Guerry dataset, a collection of historical data used in support of <NAME>’s 1833 Essay on the Moral Statistics of France
df = sm.datasets.get_rdataset("Guerry", "HistData").data
# sel specific columns from the dataset
vars = ['Department', 'Lottery', 'Literacy', 'Wealth', 'Region']
df = df[vars]
df = df.dropna() # eliminate missing values (represented by NaN in a dataframe)
df[-5:] # returns last 5 rows of data
# We want to know whether literacy rates in the 86 French departments are
# associated with per capita wagers on the Royal Lottery in the 1820s
# methodology
# We need to control for the level of wealth in each department
# we also want to include a series of dummy variables on the right-hand side of our regression equation to control for unobserved heterogeneity due to regional effects
# model is estimated using ordinary least squares regression (OLS)
# To fit most of the models covered by statsmodels, you will need to create two design matrices
# endog - is a matrix of endogenous variable(s) (i.e. dependent, response, regressand, etc.)
# exog - is a matrix of exogenous variable(s) (i.e. independent, predictor, regressor, etc.)
y, X = dmatrices('Lottery ~ Literacy + Wealth + Region', data=df, return_type='dataframe')
# dmatrices has
# split the categorical Region variable into a set of indicator variables.
# added a constant to the exogenous regressors matrix.
# returned pandas DataFrames instead of simple numpy arrays.
# patsy determined that elements of Region were text strings, so it treated Region as a categorical variable. patsy’s default is also to include an intercept, so we automatically dropped one of the Region categories.
# Fitting a model in statsmodels typically involves 3 easy steps
mod = sm.OLS(y, X) # Use the model class to describe the model
res = mod.fit() # Fit the model using a class method
print(res.summary()) # Inspect the results using a summary method
# res object has many useful attributes
res.params
res.rsquared
dir(res) # for a full list of attributes.
# Diagnostics and specification tests
# Rainbow test for linearity (the null hypothesis is that the relationship is properly modelled as linear):
sm.stats.linear_rainbow(res) # returns (test statistic based on the F test, pvalue of the test)
print(sm.stats.linear_rainbow.__doc__) # use this to interpret the output
# we can draw a plot of partial regression for a set of regressors by
sm.graphics.plot_partregress('Lottery', 'Wealth', ['Region', 'Literacy'],
data=df, obs_labels=False)
# Alternatively we can use seaborn
import seaborn as sns
sns.lmplot(data=df, y="Lottery", x="Wealth",z_score=0)#, hue="Region")
### statsmodels is using endog and exog as names for the data, the observed variables that are used in an estimation problem. A mnemonic hint to keep the two terms apart is that exogenous has an “x”, as in x-variable, in its name.
# endogenous: caused by factors within the system
# exogenous: caused by factors outside the system
### API Import for interactive use
import statsmodels.api as sm
dir(sm)
dir(sm.graphics)
dir(sm.tsa)
##############################################################################
# https://www.statsmodels.org/stable/user-guide.html
# https://online.stat.psu.edu/statprogram/
##############################################################################
### Linear Regression
# Linear models with independently and identically distributed errors,
# and for errors with heteroscedasticity or autocorrelation
# this module allows estimation by
# ordinary least squares (OLS),
# weighted least squares (WLS),
# generalized least squares (GLS), and
# feasible generalized least squares with autocorrelated AR(p) errors.
# Load modules and data
import numpy as np
import statsmodels.api as sm
spector_data = sm.datasets.spector.load(as_pandas=False)
spector_data.exog = sm.add_constant(spector_data.exog, prepend=False)
# Fit and summarize OLS model
mod = sm.OLS(spector_data.endog, spector_data.exog)
res = mod.fit()
print(res.summary())
# OLS is a special case of WLS where all weights are 1
# Ordinary Least Squares
# Artificial data:
c1=np.ones(100) # a column of 100 1s
c2 = np.linspace(0, 10, 100) # a col of 100 evenly spaced numbers between 10-100
c3 = c2**2 # a col with elements which are square of elements in c1
X = np.column_stack((c1, c2, c3)) # stack 1-D arrays as columns to get a single 2-D array
beta = np.array([1, 0.1, 10]) # beta is the coefficient estimated by regression
e = | np.random.normal(size=100) | numpy.random.normal |
'''
Code adapted from: https://github.com/ssudholt/phocnet
'''
import logging
import numpy as np
import pdb
def get_most_common_n_grams(words, num_results=50, n=2):
'''
Calculates the 50 (default) most common bigrams (default) from a
list of pages, where each page is a list of WordData objects.
Args:
words (list of str): List containing the word strings from which to extract the bigrams
num_results (int): Number of n-grams returned.
n (int): length of n-grams.
Returns:
most common <n>-grams
'''
ngrams = {}
for w in words:
w_ngrams = get_n_grams(w, n)
for ng in w_ngrams:
ngrams[ng] = ngrams.get(ng, 0) + 1
sorted_list = sorted(list(ngrams.items()), key=lambda x: x[1], reverse=True)
top_ngrams = sorted_list[:num_results]
return {k: i for i, (k, _) in enumerate(top_ngrams)}
def get_n_grams(word, n):
'''
Calculates list of ngrams for a given word.
Args:
word (str): Word to calculate ngrams for.
n (int): Maximal ngram size: n=3 extracts 1-, 2- and 3-grams.
Returns:
List of ngrams as strings.
'''
return [word[i:i+n]for i in range(len(word)-n+1)]
def build_phoc(words, phoc_unigrams, unigram_levels,
bigram_levels=None, phoc_bigrams=None,
split_character=None, on_unknown_unigram='error'):
'''
Calculate Pyramidal Histogram of Characters (PHOC) descriptor (see Almazan 2014).
Args:
word (str): word to calculate descriptor for
phoc_unigrams (str): string of all unigrams to use in the PHOC
unigram_levels (list of int): the levels for the unigrams in PHOC
phoc_bigrams (list of str): list of bigrams to be used in the PHOC
phoc_bigram_levls (list of int): the levels of the bigrams in the PHOC
split_character (str): special character to split the word strings into characters
on_unknown_unigram (str): What to do if a unigram appearing in a word
is not among the supplied phoc_unigrams. Possible: 'warn', 'error'
Returns:
the PHOC for the given word
'''
# prepare output matrix
#pdb.set_trace()
logger = logging.getLogger('PHOCGenerator')
if on_unknown_unigram not in ['error', 'warn']:
raise ValueError('I don\'t know the on_unknown_unigram parameter \'%s\'' % on_unknown_unigram)
phoc_size = len(phoc_unigrams) * np.sum(unigram_levels)
if phoc_bigrams is not None:
phoc_size += len(phoc_bigrams)* | np.sum(bigram_levels) | numpy.sum |
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
from numpy.testing import (assert_, assert_array_equal, assert_allclose,
assert_equal)
from pytest import raises as assert_raises
from scipy.sparse import coo_matrix
from scipy.special import erf
from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac,
estimate_bc_jac, compute_jac_indices,
construct_global_jac, solve_bvp)
def exp_fun(x, y):
return np.vstack((y[1], y[0]))
def exp_fun_jac(x, y):
df_dy = np.empty((2, 2, x.shape[0]))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = 1
df_dy[1, 1] = 0
return df_dy
def exp_bc(ya, yb):
return np.hstack((ya[0] - 1, yb[0]))
def exp_bc_complex(ya, yb):
return np.hstack((ya[0] - 1 - 1j, yb[0]))
def exp_bc_jac(ya, yb):
dbc_dya = np.array([
[1, 0],
[0, 0]
])
dbc_dyb = np.array([
[0, 0],
[1, 0]
])
return dbc_dya, dbc_dyb
def exp_sol(x):
return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2))
def sl_fun(x, y, p):
return np.vstack((y[1], -p[0]**2 * y[0]))
def sl_fun_jac(x, y, p):
n, m = y.shape
df_dy = np.empty((n, 2, m))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = -p[0]**2
df_dy[1, 1] = 0
df_dp = np.empty((n, 1, m))
df_dp[0, 0] = 0
df_dp[1, 0] = -2 * p[0] * y[0]
return df_dy, df_dp
def sl_bc(ya, yb, p):
return np.hstack((ya[0], yb[0], ya[1] - p[0]))
def sl_bc_jac(ya, yb, p):
dbc_dya = np.zeros((3, 2))
dbc_dya[0, 0] = 1
dbc_dya[2, 1] = 1
dbc_dyb = np.zeros((3, 2))
dbc_dyb[1, 0] = 1
dbc_dp = np.zeros((3, 1))
dbc_dp[2, 0] = -1
return dbc_dya, dbc_dyb, dbc_dp
def sl_sol(x, p):
return np.sin(p[0] * x)
def emden_fun(x, y):
return np.vstack((y[1], -y[0]**5))
def emden_fun_jac(x, y):
df_dy = np.empty((2, 2, x.shape[0]))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = -5 * y[0]**4
df_dy[1, 1] = 0
return df_dy
def emden_bc(ya, yb):
return np.array([ya[1], yb[0] - (3/4)**0.5])
def emden_bc_jac(ya, yb):
dbc_dya = np.array([
[0, 1],
[0, 0]
])
dbc_dyb = np.array([
[0, 0],
[1, 0]
])
return dbc_dya, dbc_dyb
def emden_sol(x):
return (1 + x**2/3)**-0.5
def undefined_fun(x, y):
return np.zeros_like(y)
def undefined_bc(ya, yb):
return np.array([ya[0], yb[0] - 1])
def big_fun(x, y):
f = np.zeros_like(y)
f[::2] = y[1::2]
return f
def big_bc(ya, yb):
return np.hstack((ya[::2], yb[::2] - 1))
def big_sol(x, n):
y = np.ones((2 * n, x.size))
y[::2] = x
return x
def big_fun_with_parameters(x, y, p):
""" Big version of sl_fun, with two parameters.
The two differential equations represented by sl_fun are broadcast to the
number of rows of y, rotating between the parameters p[0] and p[1].
Here are the differential equations:
dy[0]/dt = y[1]
dy[1]/dt = -p[0]**2 * y[0]
dy[2]/dt = y[3]
dy[3]/dt = -p[1]**2 * y[2]
dy[4]/dt = y[5]
dy[5]/dt = -p[0]**2 * y[4]
dy[6]/dt = y[7]
dy[7]/dt = -p[1]**2 * y[6]
.
.
.
"""
f = np.zeros_like(y)
f[::2] = y[1::2]
f[1::4] = -p[0]**2 * y[::4]
f[3::4] = -p[1]**2 * y[2::4]
return f
def big_fun_with_parameters_jac(x, y, p):
# big version of sl_fun_jac, with two parameters
n, m = y.shape
df_dy = np.zeros((n, n, m))
df_dy[range(0, n, 2), range(1, n, 2)] = 1
df_dy[range(1, n, 4), range(0, n, 4)] = -p[0]**2
df_dy[range(3, n, 4), range(2, n, 4)] = -p[1]**2
df_dp = np.zeros((n, 2, m))
df_dp[range(1, n, 4), 0] = -2 * p[0] * y[range(0, n, 4)]
df_dp[range(3, n, 4), 1] = -2 * p[1] * y[range(2, n, 4)]
return df_dy, df_dp
def big_bc_with_parameters(ya, yb, p):
# big version of sl_bc, with two parameters
return np.hstack((ya[::2], yb[::2], ya[1] - p[0], ya[3] - p[1]))
def big_bc_with_parameters_jac(ya, yb, p):
# big version of sl_bc_jac, with two parameters
n = ya.shape[0]
dbc_dya = np.zeros((n + 2, n))
dbc_dyb = np.zeros((n + 2, n))
dbc_dya[range(n // 2), range(0, n, 2)] = 1
dbc_dyb[range(n // 2, n), range(0, n, 2)] = 1
dbc_dp = np.zeros((n + 2, 2))
dbc_dp[n, 0] = -1
dbc_dya[n, 1] = 1
dbc_dp[n + 1, 1] = -1
dbc_dya[n + 1, 3] = 1
return dbc_dya, dbc_dyb, dbc_dp
def big_sol_with_parameters(x, p):
# big version of sl_sol, with two parameters
return np.vstack((np.sin(p[0] * x), np.sin(p[1] * x)))
def shock_fun(x, y):
eps = 1e-3
return np.vstack((
y[1],
-(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) +
np.pi * x * np.sin(np.pi * x)) / eps
))
def shock_bc(ya, yb):
return np.array([ya[0] + 2, yb[0]])
def shock_sol(x):
eps = 1e-3
k = np.sqrt(2 * eps)
return np.cos(np.pi * x) + erf(x / k) / erf(1 / k)
def nonlin_bc_fun(x, y):
# laplace eq.
return np.stack([y[1], np.zeros_like(x)])
def nonlin_bc_bc(ya, yb):
phiA, phipA = ya
phiC, phipC = yb
kappa, ioA, ioC, V, f = 1.64, 0.01, 1.0e-4, 0.5, 38.9
# Butler-Volmer Kinetics at Anode
hA = 0.0-phiA-0.0
iA = ioA * (np.exp(f*hA) - np.exp(-f*hA))
res0 = iA + kappa * phipA
# Butler-Volmer Kinetics at Cathode
hC = V - phiC - 1.0
iC = ioC * (np.exp(f*hC) - np.exp(-f*hC))
res1 = iC - kappa*phipC
return np.array([res0, res1])
def nonlin_bc_sol(x):
return -0.13426436116763119 - 1.1308709 * x
def test_modify_mesh():
x = np.array([0, 1, 3, 9], dtype=float)
x_new = modify_mesh(x, np.array([0]), np.array([2]))
assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9]))
x = np.array([-6, -3, 0, 3, 6], dtype=float)
x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3]))
assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6])
def test_compute_fun_jac():
x = np.linspace(0, 1, 5)
y = np.empty((2, x.shape[0]))
y[0] = 0.01
y[1] = 0.02
p = np.array([])
df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p)
df_dy_an = exp_fun_jac(x, y)
assert_allclose(df_dy, df_dy_an)
assert_(df_dp is None)
x = np.linspace(0, np.pi, 5)
y = np.empty((2, x.shape[0]))
y[0] = np.sin(x)
y[1] = np.cos(x)
p = np.array([1.0])
df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
df_dy_an, df_dp_an = sl_fun_jac(x, y, p)
assert_allclose(df_dy, df_dy_an)
assert_allclose(df_dp, df_dp_an)
x = np.linspace(0, 1, 10)
y = np.empty((2, x.shape[0]))
y[0] = (3/4)**0.5
y[1] = 1e-4
p = np.array([])
df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p)
df_dy_an = emden_fun_jac(x, y)
assert_allclose(df_dy, df_dy_an)
assert_(df_dp is None)
def test_compute_bc_jac():
ya = np.array([-1.0, 2])
yb = np.array([0.5, 3])
p = np.array([])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p)
dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_(dbc_dp is None)
ya = np.array([0.0, 1])
yb = np.array([0.0, -1])
p = np.array([0.5])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p)
dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_allclose(dbc_dp, dbc_dp_an)
ya = np.array([0.5, 100])
yb = np.array([-1000, 10.5])
p = np.array([])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p)
dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_(dbc_dp is None)
def test_compute_jac_indices():
n = 2
m = 4
k = 2
i, j = compute_jac_indices(n, m, k)
s = coo_matrix((np.ones_like(i), (i, j))).toarray()
s_true = np.array([
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
])
assert_array_equal(s, s_true)
def test_compute_global_jac():
n = 2
m = 5
k = 1
i_jac, j_jac = compute_jac_indices(2, 5, 1)
x = np.linspace(0, 1, 5)
h = np.diff(x)
y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x)))
p = np.array([3.0])
f = sl_fun(x, y, p)
x_middle = x[:-1] + 0.5 * h
y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1])
df_dy, df_dp = sl_fun_jac(x, y, p)
df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p)
dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p)
J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
J = J.toarray()
def J_block(h, p):
return np.array([
[h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h],
[0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12]
])
J_true = np.zeros((m * n + k, m * n + k))
for i in range(m - 1):
J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p[0])
J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:])
J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) +
h**2/6 * (y[1, :-1] - y[1, 1:]))
J_true[8, 0] = 1
J_true[9, 8] = 1
J_true[10, 1] = 1
J_true[10, 10] = -1
assert_allclose(J, J_true, rtol=1e-10)
df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p)
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p)
J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
J = J.toarray()
assert_allclose(J, J_true, rtol=2e-8, atol=2e-8)
def test_parameter_validation():
x = [0, 1, 0.5]
y = np.zeros((2, 3))
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
x = np.linspace(0, 1, 5)
y = np.zeros((2, 4))
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
fun = lambda x, y, p: exp_fun(x, y)
bc = lambda ya, yb, p: exp_bc(ya, yb)
y = np.zeros((2, x.shape[0]))
assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1])
def wrong_shape_fun(x, y):
return np.zeros(3)
assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y)
S = np.array([[0, 0]])
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S)
def test_no_params():
x = np.linspace(0, 1, 5)
x_test = np.linspace(0, 1, 100)
y = np.zeros((2, x.shape[0]))
for fun_jac in [None, exp_fun_jac]:
for bc_jac in [None, exp_bc_jac]:
sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_equal(sol.x.size, 5)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5)
f_test = exp_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res**2, axis=0)**0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_with_params():
x = np.linspace(0, np.pi, 5)
x_test = np.linspace(0, np.pi, 100)
y = np.ones((2, x.shape[0]))
for fun_jac in [None, sl_fun_jac]:
for bc_jac in [None, sl_bc_jac]:
sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_(sol.x.size < 10)
assert_allclose(sol.p, [1], rtol=1e-4)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], sl_sol(x_test, [1]),
rtol=1e-4, atol=1e-4)
f_test = sl_fun(x_test, sol_test, [1])
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_singular_term():
x = np.linspace(0, 1, 10)
x_test = np.linspace(0.05, 1, 100)
y = np.empty((2, 10))
y[0] = (3/4)**0.5
y[1] = 1e-4
S = np.array([[0, 0], [0, -2]])
for fun_jac in [None, emden_fun_jac]:
for bc_jac in [None, emden_bc_jac]:
sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
| assert_equal(sol.x.size, 10) | numpy.testing.assert_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 09:17:52 2019
@author: <NAME>, https://github.com/zhaofenqiang
Contact: <EMAIL>
"""
import numpy as np
from interp_numpy import resampleSphereSurf, bilinearResampleSphereSurfImg
# from utils import get_neighs_order
def get_rot_mat_zyz(z1, y2, z3):
"""
first z3, then y2, lastly z1
"""
return np.array([[np.cos(z1) * np.cos(y2) * np.cos(z3) - np.sin(z1) * np.sin(z3), -np.cos(z1) * np.cos(y2) * np.sin(z3) - np.sin(z1) * np.cos(z3), np.cos(z1) * np.sin(y2)],
[np.cos(z1) * np.sin(z3) + np.sin(z1) * np.cos(y2) * np.cos(z3), -np.sin(z1) * np.cos(y2) * np.sin(z3) + np.cos(z1) * np.cos(z3), np.sin(z1) * np.sin(y2)],
[-np.sin(y2) * np.cos(z3), np.sin(y2) * np.sin(z3), np.cos(y2)]])
def get_rot_mat_zyx(z1, y2, x3):
"""
first x3, then y2, lastly z1
"""
return np.array([[np.cos(z1) * np.cos(y2), np.cos(z1) * np.sin(y2) * np.sin(x3) - np.sin(z1) * np.cos(x3), np.sin(z1) * np.sin(x3) + np.cos(z1) * np.cos(x3) * np.sin(y2)],
[np.cos(y2) * np.sin(z1), np.cos(z1) * np.cos(x3) + np.sin(z1) * np.sin(y2) * np.sin(x3), np.cos(x3) * np.sin(z1) * np.sin(y2) - np.cos(z1) * | np.sin(x3) | numpy.sin |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
import copy
import json
import math
import os
from absl.testing import parameterized
from keras import backend
from keras import combinations
from keras import keras_parameterized
from keras import layers
from keras import metrics
from keras import Model
from keras import testing_utils
from keras.engine import base_layer
from keras.engine import training as training_module
import numpy as np
import tensorflow.compat.v2 as tf
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KerasSumTest(tf.test.TestCase, parameterized.TestCase):
def test_sum(self):
with self.test_session():
m = metrics.Sum(name='my_sum')
# check config
self.assertEqual(m.name, 'my_sum')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, tf.float32)
self.assertLen(m.variables, 1)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(tf.convert_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
# check reset_state()
m.reset_state()
self.assertEqual(self.evaluate(m.total), 0)
def test_sum_with_sample_weight(self):
m = metrics.Sum(dtype=tf.float64)
self.assertEqual(m.dtype, tf.float64)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50)
self.assertEqual(self.evaluate(m.total), 50)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52., 4) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.total), 52., 4)
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5, 1) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 1)
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5, 1) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 1)
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 1)
# check values reduced to the dimensions of weight
result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])
result = np.round(self.evaluate(result_t), decimals=2)
# result = (prev: 57.5) + 0.5 + 1 + 1.5 + 1 + 0.25 + 2
self.assertAlmostEqual(result, 63.75, 2)
self.assertAlmostEqual(self.evaluate(m.total), 63.75, 2)
def test_sum_graph_with_placeholder(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:
m = metrics.Sum()
v = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(result, 50)
self.assertEqual(self.evaluate(m.total), 50)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(result, 52., 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.total), 52., 2)
def test_save_restore(self):
with self.test_session():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
m = metrics.Sum()
checkpoint = tf.train.Checkpoint(sum=m)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# update state
self.evaluate(m(100.))
self.evaluate(m(200.))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.))
# restore to the same checkpoint sum object (= 300)
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.))
self.assertEqual(600., self.evaluate(m.result()))
# restore to a different checkpoint sum object
restore_sum = metrics.Sum()
restore_checkpoint = tf.train.Checkpoint(sum=restore_sum)
status = restore_checkpoint.restore(save_path)
restore_update = restore_sum(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(600., self.evaluate(restore_sum.result()))
class MeanTest(keras_parameterized.TestCase):
# TODO(b/120949004): Re-enable garbage collection check
# @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
@keras_parameterized.run_all_keras_modes
def test_mean(self):
m = metrics.Mean(name='my_mean')
# check config
self.assertEqual(m.name, 'my_mean')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, tf.float32)
self.assertEqual(len(m.variables), 2)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state([
tf.convert_to_tensor(1),
tf.convert_to_tensor(5)
])
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
self.assertEqual(self.evaluate(m.count), 3)
# check reset_state()
m.reset_state()
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# Check save and restore config
m2 = metrics.Mean.from_config(m.get_config())
self.assertEqual(m2.name, 'my_mean')
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, tf.float32)
self.assertEqual(len(m2.variables), 2)
@testing_utils.run_v2_only
def test_function_wrapped_reset_state(self):
m = metrics.Mean(name='my_mean')
# check reset_state in function.
@tf.function
def reset_in_fn():
m.reset_state()
return m.update_state(100)
for _ in range(5):
self.evaluate(reset_in_fn())
self.assertEqual(self.evaluate(m.count), 1)
@keras_parameterized.run_all_keras_modes
def test_mean_with_sample_weight(self):
m = metrics.Mean(dtype=tf.float64)
self.assertEqual(m.dtype, tf.float64)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50 / 0.5)
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52 / 1.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.count), 2.7, 2) # 1.7 + 0.5 + 0.5
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2)
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2) # 2.7 + 1.2
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2)
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2) # 3.9 + 1.2
# check values reduced to the dimensions of weight
result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])
result = np.round(self.evaluate(result_t), decimals=2) # 58.5 / 5.6
self.assertEqual(result, 10.45)
self.assertEqual(np.round(self.evaluate(m.total), decimals=2), 58.54)
self.assertEqual(np.round(self.evaluate(m.count), decimals=2), 5.6)
@keras_parameterized.run_all_keras_modes
def test_mean_graph_with_placeholder(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:
m = metrics.Mean()
v = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
self.assertEqual(result, 50 / 0.5)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
self.assertAlmostEqual(result, 52 / 1.7, 2)
@keras_parameterized.run_all_keras_modes
def test_save_restore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
m = metrics.Mean()
checkpoint = tf.train.Checkpoint(mean=m)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# update state
self.evaluate(m(100.))
self.evaluate(m(200.))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.))
# restore to the same checkpoint mean object
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.))
self.assertEqual(200., self.evaluate(m.result()))
# restore to a different checkpoint mean object
restore_mean = metrics.Mean()
restore_checkpoint = tf.train.Checkpoint(mean=restore_mean)
status = restore_checkpoint.restore(save_path)
restore_update = restore_mean(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(200., self.evaluate(restore_mean.result()))
self.assertEqual(3, self.evaluate(restore_mean.count))
@keras_parameterized.run_all_keras_modes
def test_multiple_instances(self):
m = metrics.Mean()
m2 = metrics.Mean()
self.assertEqual(m.name, 'mean')
self.assertEqual(m2.name, 'mean')
self.assertEqual([v.name for v in m.variables],
testing_utils.get_expected_metric_variable_names(
['total', 'count']))
self.assertEqual([v.name for v in m2.variables],
testing_utils.get_expected_metric_variable_names(
['total', 'count'], name_suffix='_1'))
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
self.evaluate(tf.compat.v1.variables_initializer(m2.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
self.assertEqual(self.evaluate(m2.total), 0)
self.assertEqual(self.evaluate(m2.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
self.assertEqual(self.evaluate(m2.total), 0)
self.assertEqual(self.evaluate(m2.count), 0)
self.assertEqual(self.evaluate(m2([63, 10])), 36.5)
self.assertEqual(self.evaluate(m2.total), 73)
self.assertEqual(self.evaluate(m2.count), 2)
self.assertEqual(self.evaluate(m.result()), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
@testing_utils.run_v2_only
def test_deepcopy_of_metrics(self):
m = metrics.Mean(name='my_mean')
m.reset_state()
m.update_state(100)
m_copied = copy.deepcopy(m)
m_copied.update_state(200)
self.assertEqual(self.evaluate(m.result()), 100)
self.assertEqual(self.evaluate(m_copied.result()), 150)
m.reset_state()
self.assertEqual(self.evaluate(m.result()), 0)
self.assertEqual(self.evaluate(m_copied.result()), 150)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KerasAccuracyTest(tf.test.TestCase):
def test_accuracy(self):
acc_obj = metrics.Accuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [2], [3], [4]], [[1], [2], [3], [4]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# Check save and restore config
a2 = metrics.Accuracy.from_config(acc_obj.get_config())
self.assertEqual(a2.name, 'my_acc')
self.assertTrue(a2.stateful)
self.assertEqual(len(a2.variables), 2)
self.assertEqual(a2.dtype, tf.float32)
# check with sample_weight
result_t = acc_obj([[2], [1]], [[2], [0]], sample_weight=[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_accuracy_ragged(self):
acc_obj = metrics.Accuracy(name='my_acc')
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = tf.ragged.constant([[1], [2], [3], [4]])
rt2 = tf.ragged.constant([[1], [2], [3], [4]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
rt1 = tf.ragged.constant([[2], [1]])
rt2 = tf.ragged.constant([[2], [0]])
sw_ragged = tf.ragged.constant([[0.5], [0.2]])
result_t = acc_obj(rt1, rt2, sample_weight=sw_ragged)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_binary_accuracy(self):
acc_obj = metrics.BinaryAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [0]], [[1], [0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_pred squeeze
update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertAlmostEqual(result, 0.75, 2) # 3/4
# check y_true squeeze
result_t = acc_obj([[[1]], [[1]]], [[1], [0]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4/6
# check with sample_weight
result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4.5/6.7
def test_binary_accuracy_ragged(self):
acc_obj = metrics.BinaryAccuracy(name='my_acc')
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = tf.ragged.constant([[1], [0]])
rt2 = tf.ragged.constant([[1], [0]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_true squeeze only supported for dense tensors and is
# not supported by ragged tensor (different ranks). --> error
rt1 = tf.ragged.constant([[[1], [1]]])
rt2 = tf.ragged.constant([[1], [0]])
with self.assertRaises(ValueError):
result_t = acc_obj(rt1, rt2)
result = self.evaluate(result_t)
def test_binary_accuracy_threshold(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_binary_accuracy_threshold_ragged(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
rt1 = tf.ragged.constant([[1], [1], [0], [0]])
rt2 = tf.ragged.constant([[0.9], [0.6], [0.4], [0.8]])
result_t = acc_obj(rt1, rt2)
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_categorical_accuracy(self):
acc_obj = metrics.CategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_categorical_accuracy_ragged(self):
acc_obj = metrics.CategoricalAccuracy(name='my_acc')
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
rt1 = tf.ragged.constant([[0, 0, 1], [0, 1, 0]])
rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
rt1 = tf.ragged.constant([[0, 0, 1], [0, 1, 0]])
rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0, 0.95]])
sample_weight = tf.ragged.constant([[0.5], [0.2]])
with self.assertRaises(tf.errors.InvalidArgumentError):
result_t = acc_obj(rt1, rt2, sample_weight)
result = self.evaluate(result_t)
def test_sparse_categorical_accuracy(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[2], [1]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_ragged(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# verify that correct value is returned
rt1 = tf.ragged.constant([[2], [1]])
rt2 = tf.ragged.constant([[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
with self.assertRaises(tf.errors.InvalidArgumentError):
# sparse_categorical_accuracy is not supported for composite/ragged
# tensors.
update_op = acc_obj.update_state(rt1, rt2)
self.evaluate(update_op)
def test_sparse_categorical_accuracy_mismatched_dims(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
# check config
self.assertEqual(acc_obj.name, 'my_acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([2, 1], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([2, 1], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_mismatched_dims_dynamic(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess:
acc_obj = metrics.SparseCategoricalAccuracy(name='my_acc')
self.evaluate(tf.compat.v1.variables_initializer(acc_obj.variables))
t = tf.compat.v1.placeholder(tf.float32)
p = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)
result_t = acc_obj(t, p, w)
result = sess.run(
result_t,
feed_dict=({
t: [2, 1],
p: [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
w: [[0.5], [0.2]]
}))
self.assertAlmostEqual(result, 0.71, 2) # 2.5/2.7
def test_get_acc(self):
acc_fn = metrics.get('acc')
self.assertEqual(acc_fn, metrics.accuracy)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CosineSimilarityTest(tf.test.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = tf.constant(self.np_y_true)
self.y_pred = tf.constant(self.np_y_pred)
def test_config(self):
cosine_obj = metrics.CosineSimilarity(
axis=2, name='my_cos', dtype=tf.int32)
self.assertEqual(cosine_obj.name, 'my_cos')
self.assertEqual(cosine_obj._dtype, tf.int32)
# Check save and restore config
cosine_obj2 = metrics.CosineSimilarity.from_config(cosine_obj.get_config())
self.assertEqual(cosine_obj2.name, 'my_cos')
self.assertEqual(cosine_obj2._dtype, tf.int32)
def test_unweighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_weighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(
self.y_true,
self.y_pred,
sample_weight=tf.constant(sample_weight))
expected_loss = np.sum(
self.expected_loss * sample_weight) / np.sum(sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = metrics.CosineSimilarity(axis=1)
self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanAbsoluteErrorTest(tf.test.TestCase):
def test_config(self):
mae_obj = metrics.MeanAbsoluteError(name='my_mae', dtype=tf.int32)
self.assertEqual(mae_obj.name, 'my_mae')
self.assertEqual(mae_obj._dtype, tf.int32)
# Check save and restore config
mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())
self.assertEqual(mae_obj2.name, 'my_mae')
self.assertEqual(mae_obj2._dtype, tf.int32)
def test_unweighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(tf.compat.v1.variables_initializer(mae_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mae_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mae_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(tf.compat.v1.variables_initializer(mae_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanAbsolutePercentageErrorTest(tf.test.TestCase):
def test_config(self):
mape_obj = metrics.MeanAbsolutePercentageError(
name='my_mape', dtype=tf.int32)
self.assertEqual(mape_obj.name, 'my_mape')
self.assertEqual(mape_obj._dtype, tf.int32)
# Check save and restore config
mape_obj2 = metrics.MeanAbsolutePercentageError.from_config(
mape_obj.get_config())
self.assertEqual(mape_obj2.name, 'my_mape')
self.assertEqual(mape_obj2._dtype, tf.int32)
def test_unweighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(tf.compat.v1.variables_initializer(mape_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mape_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mape_obj.result()
self.assertAllClose(35e7, result, atol=1e-5)
def test_weighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(tf.compat.v1.variables_initializer(mape_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(40e7, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanSquaredErrorTest(tf.test.TestCase):
def test_config(self):
mse_obj = metrics.MeanSquaredError(name='my_mse', dtype=tf.int32)
self.assertEqual(mse_obj.name, 'my_mse')
self.assertEqual(mse_obj._dtype, tf.int32)
# Check save and restore config
mse_obj2 = metrics.MeanSquaredError.from_config(mse_obj.get_config())
self.assertEqual(mse_obj2.name, 'my_mse')
self.assertEqual(mse_obj2._dtype, tf.int32)
def test_unweighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(mse_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(mse_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanSquaredLogarithmicErrorTest(tf.test.TestCase):
def test_config(self):
msle_obj = metrics.MeanSquaredLogarithmicError(
name='my_msle', dtype=tf.int32)
self.assertEqual(msle_obj.name, 'my_msle')
self.assertEqual(msle_obj._dtype, tf.int32)
# Check save and restore config
msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config(
msle_obj.get_config())
self.assertEqual(msle_obj2.name, 'my_msle')
self.assertEqual(msle_obj2._dtype, tf.int32)
def test_unweighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(tf.compat.v1.variables_initializer(msle_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = msle_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = msle_obj.result()
self.assertAllClose(0.24022, result, atol=1e-5)
def test_weighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(tf.compat.v1.variables_initializer(msle_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.26082, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class HingeTest(tf.test.TestCase):
def test_config(self):
hinge_obj = metrics.Hinge(name='hinge', dtype=tf.int32)
self.assertEqual(hinge_obj.name, 'hinge')
self.assertEqual(hinge_obj._dtype, tf.int32)
# Check save and restore config
hinge_obj2 = metrics.Hinge.from_config(hinge_obj.get_config())
self.assertEqual(hinge_obj2.name, 'hinge')
self.assertEqual(hinge_obj2._dtype, tf.int32)
def test_unweighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(tf.compat.v1.variables_initializer(hinge_obj.variables))
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# reduced metric = (0.6 + 0.4125) / 2
update_op = hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = hinge_obj.result()
self.assertAllClose(0.506, result, atol=1e-3)
def test_weighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(tf.compat.v1.variables_initializer(hinge_obj.variables))
y_true = tf.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
sample_weight = tf.constant([1.5, 2.])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# metric = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# weighted metric = [0.6 * 1.5, 0.4125 * 2]
# reduced metric = (0.6 * 1.5 + 0.4125 * 2) / (1.5 + 2)
result = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.493, self.evaluate(result), atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SquaredHingeTest(tf.test.TestCase):
def test_config(self):
sq_hinge_obj = metrics.SquaredHinge(name='sq_hinge', dtype=tf.int32)
self.assertEqual(sq_hinge_obj.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj._dtype, tf.int32)
# Check save and restore config
sq_hinge_obj2 = metrics.SquaredHinge.from_config(sq_hinge_obj.get_config())
self.assertEqual(sq_hinge_obj2.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj2._dtype, tf.int32)
def test_unweighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(tf.compat.v1.variables_initializer(sq_hinge_obj.variables))
y_true = tf.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# reduced metric = (0.485 + 0.2431) / 2
update_op = sq_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = sq_hinge_obj.result()
self.assertAllClose(0.364, result, atol=1e-3)
def test_weighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(tf.compat.v1.variables_initializer(sq_hinge_obj.variables))
y_true = tf.constant([[-1, 1, -1, 1], [-1, -1, 1, 1]])
y_pred = tf.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
sample_weight = tf.constant([1.5, 2.])
# metric = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# metric = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# weighted metric = [0.485 * 1.5, 0.2431 * 2]
# reduced metric = (0.485 * 1.5 + 0.2431 * 2) / (1.5 + 2)
result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.347, self.evaluate(result), atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CategoricalHingeTest(tf.test.TestCase):
def test_config(self):
cat_hinge_obj = metrics.CategoricalHinge(
name='cat_hinge', dtype=tf.int32)
self.assertEqual(cat_hinge_obj.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj._dtype, tf.int32)
# Check save and restore config
cat_hinge_obj2 = metrics.CategoricalHinge.from_config(
cat_hinge_obj.get_config())
self.assertEqual(cat_hinge_obj2.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj2._dtype, tf.int32)
def test_unweighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(tf.compat.v1.variables_initializer(cat_hinge_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = cat_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = cat_hinge_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(tf.compat.v1.variables_initializer(cat_hinge_obj.variables))
y_true = tf.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = tf.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = tf.constant((1., 1.5, 2., 2.5))
result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.5, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class RootMeanSquaredErrorTest(tf.test.TestCase):
def test_config(self):
rmse_obj = metrics.RootMeanSquaredError(name='rmse', dtype=tf.int32)
self.assertEqual(rmse_obj.name, 'rmse')
self.assertEqual(rmse_obj._dtype, tf.int32)
rmse_obj2 = metrics.RootMeanSquaredError.from_config(rmse_obj.get_config())
self.assertEqual(rmse_obj2.name, 'rmse')
self.assertEqual(rmse_obj2._dtype, tf.int32)
def test_unweighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(rmse_obj.variables))
y_true = tf.constant((2, 4, 6))
y_pred = tf.constant((1, 3, 2))
update_op = rmse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = rmse_obj.result()
# error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6
self.assertAllClose(math.sqrt(6), result, atol=1e-3)
def test_weighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(rmse_obj.variables))
y_true = tf.constant((2, 4, 6, 8))
y_pred = tf.constant((1, 3, 2, 3))
sample_weight = tf.constant((0, 1, 0, 1))
result = rmse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(math.sqrt(13), self.evaluate(result), atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TopKCategoricalAccuracyTest(tf.test.TestCase):
def test_config(self):
a_obj = metrics.TopKCategoricalAccuracy(name='topkca', dtype=tf.int32)
self.assertEqual(a_obj.name, 'topkca')
self.assertEqual(a_obj._dtype, tf.int32)
a_obj2 = metrics.TopKCategoricalAccuracy.from_config(a_obj.get_config())
self.assertEqual(a_obj2.name, 'topkca')
self.assertEqual(a_obj2._dtype, tf.int32)
def test_correctness(self):
a_obj = metrics.TopKCategoricalAccuracy()
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([[0, 0, 1], [0, 1, 0]])
y_pred = tf.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.TopKCategoricalAccuracy(k=1)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_true = tf.constant([[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0]])
y_pred = tf.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.TopKCategoricalAccuracy(k=6)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def test_weighted(self):
a_obj = metrics.TopKCategoricalAccuracy(k=2)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
y_pred = tf.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])
sample_weight = tf.constant((1.0, 0.0, 1.0))
result = a_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SparseTopKCategoricalAccuracyTest(tf.test.TestCase):
def test_config(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(
name='stopkca', dtype=tf.int32)
self.assertEqual(a_obj.name, 'stopkca')
self.assertEqual(a_obj._dtype, tf.int32)
a_obj2 = metrics.SparseTopKCategoricalAccuracy.from_config(
a_obj.get_config())
self.assertEqual(a_obj2.name, 'stopkca')
self.assertEqual(a_obj2._dtype, tf.int32)
def test_correctness(self):
a_obj = metrics.SparseTopKCategoricalAccuracy()
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([2, 1])
y_pred = tf.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.SparseTopKCategoricalAccuracy(k=1)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_pred = tf.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.SparseTopKCategoricalAccuracy(k=6)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
def test_weighted(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(k=2)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
y_true = tf.constant([1, 0, 2])
y_pred = tf.constant([[0, 0.9, 0.1], [0, 0.9, 0.1], [0, 0.9, 0.1]])
sample_weight = tf.constant((1.0, 0.0, 1.0))
result = a_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(1.0, self.evaluate(result), atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LogCoshErrorTest(tf.test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
error = y_pred - y_true
self.expected_results = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = tf.constant(y_pred, dtype=tf.float32)
self.y_true = tf.constant(y_true)
def test_config(self):
logcosh_obj = metrics.LogCoshError(name='logcosh', dtype=tf.int32)
self.assertEqual(logcosh_obj.name, 'logcosh')
self.assertEqual(logcosh_obj._dtype, tf.int32)
def test_unweighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
self.evaluate(tf.compat.v1.variables_initializer(logcosh_obj.variables))
update_op = logcosh_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = logcosh_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
self.evaluate(tf.compat.v1.variables_initializer(logcosh_obj.variables))
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
result = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class PoissonTest(tf.test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
self.expected_results = y_pred - np.multiply(y_true, np.log(y_pred))
self.y_pred = tf.constant(y_pred, dtype=tf.float32)
self.y_true = tf.constant(y_true)
def test_config(self):
poisson_obj = metrics.Poisson(name='poisson', dtype=tf.int32)
self.assertEqual(poisson_obj.name, 'poisson')
self.assertEqual(poisson_obj._dtype, tf.int32)
poisson_obj2 = metrics.Poisson.from_config(poisson_obj.get_config())
self.assertEqual(poisson_obj2.name, 'poisson')
self.assertEqual(poisson_obj2._dtype, tf.int32)
def test_unweighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(tf.compat.v1.variables_initializer(poisson_obj.variables))
update_op = poisson_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = poisson_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(tf.compat.v1.variables_initializer(poisson_obj.variables))
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
result = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KLDivergenceTest(tf.test.TestCase):
def setup(self):
y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3))
y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3))
self.batch_size = 2
self.expected_results = np.multiply(y_true, np.log(y_true / y_pred))
self.y_pred = tf.constant(y_pred, dtype=tf.float32)
self.y_true = tf.constant(y_true)
def test_config(self):
k_obj = metrics.KLDivergence(name='kld', dtype=tf.int32)
self.assertEqual(k_obj.name, 'kld')
self.assertEqual(k_obj._dtype, tf.int32)
k_obj2 = metrics.KLDivergence.from_config(k_obj.get_config())
self.assertEqual(k_obj2.name, 'kld')
self.assertEqual(k_obj2._dtype, tf.int32)
def test_unweighted(self):
self.setup()
k_obj = metrics.KLDivergence()
self.evaluate(tf.compat.v1.variables_initializer(k_obj.variables))
update_op = k_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = k_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
k_obj = metrics.KLDivergence()
self.evaluate(tf.compat.v1.variables_initializer(k_obj.variables))
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
result = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / (1.2 + 3.4)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanRelativeErrorTest(tf.test.TestCase):
def test_config(self):
normalizer = tf.constant([1, 3], dtype=tf.float32)
mre_obj = metrics.MeanRelativeError(normalizer=normalizer, name='mre')
self.assertEqual(mre_obj.name, 'mre')
self.assertArrayNear(self.evaluate(mre_obj.normalizer), [1, 3], 1e-1)
mre_obj2 = metrics.MeanRelativeError.from_config(mre_obj.get_config())
self.assertEqual(mre_obj2.name, 'mre')
self.assertArrayNear(self.evaluate(mre_obj2.normalizer), [1, 3], 1e-1)
def test_unweighted(self):
np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_y_pred - np_y_true), np_y_true))
y_pred = tf.constant(np_y_pred, shape=(1, 4), dtype=tf.float32)
y_true = tf.constant(np_y_true, shape=(1, 4))
mre_obj = metrics.MeanRelativeError(normalizer=y_true)
self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))
result = mre_obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)
def test_weighted(self):
np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)
sample_weight = np.asarray([0.2, 0.3, 0.5, 0], dtype=np.float32)
rel_errors = np.divide(np.absolute(np_y_pred - np_y_true), np_y_true)
expected_error = np.sum(rel_errors * sample_weight)
y_pred = tf.constant(np_y_pred, dtype=tf.float32)
y_true = tf.constant(np_y_true)
mre_obj = metrics.MeanRelativeError(normalizer=y_true)
self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))
result = mre_obj(
y_true, y_pred, sample_weight=tf.constant(sample_weight))
self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)
def test_zero_normalizer(self):
y_pred = tf.constant([2, 4], dtype=tf.float32)
y_true = tf.constant([1, 3])
mre_obj = metrics.MeanRelativeError(normalizer=tf.zeros_like(y_true))
self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))
result = mre_obj(y_true, y_pred)
self.assertEqual(self.evaluate(result), 0)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class IoUTest(tf.test.TestCase):
def test_config(self):
obj = metrics.IoU(
num_classes=2, target_class_ids=[1, 0], name='iou_class_1_0')
self.assertEqual(obj.name, 'iou_class_1_0')
self.assertEqual(obj.num_classes, 2)
self.assertEqual(obj.target_class_ids, [1, 0])
obj2 = metrics.IoU.from_config(obj.get_config())
self.assertEqual(obj2.name, 'iou_class_1_0')
self.assertEqual(obj2.num_classes, 2)
self.assertEqual(obj2.target_class_ids, [1, 0])
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_weighted(self):
y_pred = tf.constant([0, 1, 0, 1], dtype=tf.float32)
y_true = tf.constant([0, 0, 1, 1])
sample_weight = tf.constant([0.2, 0.3, 0.4, 0.1])
obj = metrics.IoU(num_classes=2, target_class_ids=[1, 0])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.1 / (0.4 + 0.5 - 0.1) + 0.2 / (0.6 + 0.5 - 0.2)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = tf.constant([[0, 1], [0, 1]], dtype=tf.float32)
y_true = tf.constant([[0, 0], [1, 1]])
sample_weight = tf.constant([[0.2, 0.3], [0.4, 0.1]])
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_zero_valid_entries(self):
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
self.assertAllClose(
self.evaluate(obj.result()), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = tf.constant([1], dtype=tf.float32)
y_true = tf.constant([1])
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (1 + 1 - 1)) / 1
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class BinaryIoUTest(tf.test.TestCase):
def test_config(self):
obj = metrics.BinaryIoU(
target_class_ids=[1, 0], threshold=0.1, name='iou_class_1_0')
self.assertEqual(obj.name, 'iou_class_1_0')
self.assertAlmostEqual(obj.threshold, 0.1)
self.assertEqual(obj.target_class_ids, [1, 0])
obj2 = metrics.BinaryIoU.from_config(obj.get_config())
self.assertEqual(obj.name, 'iou_class_1_0')
self.assertAlmostEqual(obj2.threshold, 0.1)
self.assertEqual(obj.target_class_ids, [1, 0])
def test_different_thresholds_weighted(self):
y_true = [0, 1, 0, 1]
y_pred = [0.1, 0.2, 0.4, 0.7]
sample_weight = tf.constant([0.2, 0.3, 0.4, 0.1])
# with threshold = 0.3, y_pred will be converted to [0, 0, 1, 1]
# cm = [[0.2, 0.4],
# [0.3, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
sample_weight = tf.constant([0.1, 0.2, 0.4, 0.3])
# with threshold = 0.5, y_pred will be converted to [0, 0, 0, 1]
# cm = [[0.1+0.4, 0],
# [0.2, 0.3]]
# sum_row = [0.5, 0.5], sum_col = [0.7, 0.3], true_positives = [0.5, 0.3]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.5 / (0.5 + 0.7 - 0.5) + 0.3 / (0.5 + 0.3 - 0.3)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.5)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_different_thresholds_unweighted(self):
y_true = [0, 1, 0, 1]
y_pred = [0.1, 0.2, 0.4, 0.7]
# with threshold = 0.3, y_pred will be converted to [0, 0, 1, 1]
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
# with threshold = 0.5, y_pred will be converted to [0, 0, 0, 1]
# cm = [[2, 0],
# [1, 1]]
# sum_row = [2, 2], sum_col = [3, 1], true_positives = [2, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (2 / (2 + 3 - 2) + 1 / (2 + 1 - 1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.5)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_true = tf.constant([[0, 1], [0, 1]], dtype=tf.float32)
y_pred = tf.constant([[0.1, 0.7], [0.9, 0.3]])
threshold = 0.4 # y_pred will become [[0, 1], [1, 0]]
sample_weight = tf.constant([[0.2, 0.3], [0.4, 0.1]])
# cm = [[0.2, 0.4],
# [0.1, 0.3]]
# sum_row = [0.6, 0.4], sum_col = [0.3, 0.7], true_positives = [0.2, 0.3]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.3 - 0.2) + 0.3 / (0.4 + 0.7 - 0.3)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=threshold)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_zero_valid_entries(self):
obj = metrics.BinaryIoU(target_class_ids=[0, 1])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
self.assertAllClose(
self.evaluate(obj.result()), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = tf.constant([0.6], dtype=tf.float32)
threshold = 0.5
y_true = tf.constant([1])
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=threshold)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = 1 / (1 + 1 - 1)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanIoUTest(tf.test.TestCase):
def test_config(self):
m_obj = metrics.MeanIoU(num_classes=2, name='mean_iou')
self.assertEqual(m_obj.name, 'mean_iou')
self.assertEqual(m_obj.num_classes, 2)
m_obj2 = metrics.MeanIoU.from_config(m_obj.get_config())
self.assertEqual(m_obj2.name, 'mean_iou')
self.assertEqual(m_obj2.num_classes, 2)
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_weighted(self):
y_pred = tf.constant([0, 1, 0, 1], dtype=tf.float32)
y_true = tf.constant([0, 0, 1, 1])
sample_weight = tf.constant([0.2, 0.3, 0.4, 0.1])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = tf.constant([[0, 1], [0, 1]], dtype=tf.float32)
y_true = tf.constant([[0, 0], [1, 1]])
sample_weight = tf.constant([[0.2, 0.3], [0.4, 0.1]])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)) / 2
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_zero_valid_entries(self):
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
self.assertAllClose(self.evaluate(m_obj.result()), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = tf.constant([1], dtype=tf.float32)
y_true = tf.constant([1])
m_obj = metrics.MeanIoU(num_classes=2)
self.evaluate(tf.compat.v1.variables_initializer(m_obj.variables))
result = m_obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 1 / (1 + 1 - 1)) / 1
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class OneHotIoUTest(tf.test.TestCase):
def test_unweighted(self):
y_true = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = tf.constant([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
[0.1, 0.4, 0.5]])
# y_pred will be converted to [2, 2, 0, 2]
# cm = [[0, 0, 2],
# [1, 0, 0],
# [0, 0, 1]
# sum_row = [1, 0, 3], sum_col = [2, 1, 1], true_positives = [0, 0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 / (1 + 2 - 0) + 1 / (3 + 1 - 1)) / 2
obj = metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_weighted(self):
y_true = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = tf.constant([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
[0.1, 0.4, 0.5]])
# y_pred will be converted to [2, 2, 0, 2]
sample_weight = [0.1, 0.2, 0.3, 0.4]
# cm = [[0, 0, 0.2+0.4],
# [0.3, 0, 0],
# [0, 0, 0.1]]
# sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1]
# true_positives = [0, 0, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 / (0.3 + 0.6 - 0) + 0.1 / (0.7 + 0.1 - 0.1)) / 2
obj = metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2])
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class OneHotMeanIoUTest(tf.test.TestCase):
def test_unweighted(self):
y_true = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
# y_true will be converted to [2, 0, 1, 0]
y_pred = tf.constant([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
[0.1, 0.4, 0.5]])
# y_pred will be converted to [2, 2, 0, 2]
# cm = [[0, 0, 2],
# [1, 0, 0],
# [0, 0, 1]
# sum_row = [1, 0, 3], sum_col = [2, 1, 1], true_positives = [0, 0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0 + 0 + 1 / (3 + 1 - 1)) / 3
obj = metrics.OneHotMeanIoU(num_classes=3)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def test_weighted(self):
y_true = tf.constant([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
])
# y_true will be converted to [2, 0, 1, 0, 0]
y_pred = tf.constant([
[0.2, 0.3, 0.5],
[0.1, 0.2, 0.7],
[0.5, 0.3, 0.1],
[0.1, 0.4, 0.5],
[0.6, 0.2, 0.2],
])
# y_pred will be converted to [2, 2, 0, 2, 0]
sample_weight = [0.1, 0.2, 0.3, 0.3, 0.1]
# cm = [[0.1, 0, 0.2+0.3],
# [0.3, 0, 0],
# [0, 0, 0.1]]
# sum_row = [0.4, 0, 0.6], sum_col = [0.6, 0.3, 0.1]
# true_positives = [0.1, 0, 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (0.1 / (0.4 + 0.6 - 0.1) + 0 + 0.1 /
(0.6 + 0.1 - 0.1)) / 3
obj = metrics.OneHotMeanIoU(num_classes=3)
self.evaluate(tf.compat.v1.variables_initializer(obj.variables))
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
class MeanTensorTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_config(self):
with self.test_session():
m = metrics.MeanTensor(name='mean_by_element')
# check config
self.assertEqual(m.name, 'mean_by_element')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, tf.float32)
self.assertEmpty(m.variables)
with self.assertRaisesRegex(ValueError, 'does not have any value yet'):
m.result()
self.evaluate(m([[3], [5], [3]]))
self.assertAllEqual(m._shape, [3, 1])
m2 = metrics.MeanTensor.from_config(m.get_config())
self.assertEqual(m2.name, 'mean_by_element')
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, tf.float32)
self.assertEmpty(m2.variables)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_unweighted(self):
with self.test_session():
m = metrics.MeanTensor(dtype=tf.float64)
# check __call__()
self.assertAllClose(self.evaluate(m([100, 40])), [100, 40])
self.assertAllClose(self.evaluate(m.total), [100, 40])
self.assertAllClose(self.evaluate(m.count), [1, 1])
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state([
tf.convert_to_tensor(1),
tf.convert_to_tensor(5)
])
self.evaluate(update_op)
self.assertAllClose(self.evaluate(m.result()), [50.5, 22.5])
self.assertAllClose(self.evaluate(m.total), [101, 45])
self.assertAllClose(self.evaluate(m.count), [2, 2])
# check reset_state()
m.reset_state()
self.assertAllClose(self.evaluate(m.total), [0, 0])
self.assertAllClose(self.evaluate(m.count), [0, 0])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weighted(self):
with self.test_session():
m = metrics.MeanTensor(dtype=tf.float64)
self.assertEqual(m.dtype, tf.float64)
# check scalar weight
result_t = m([100, 30], sample_weight=0.5)
self.assertAllClose(self.evaluate(result_t), [100, 30])
self.assertAllClose(self.evaluate(m.total), [50, 15])
self.assertAllClose(self.evaluate(m.count), [0.5, 0.5])
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAllClose(result, [51 / 1.5, 16 / 0.7], 2)
self.assertAllClose(self.evaluate(m.total), [51, 16])
self.assertAllClose(self.evaluate(m.count), [1.5, 0.7])
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAllClose(self.evaluate(result_t), [51.5 / 2, 17 / 1.2])
self.assertAllClose(self.evaluate(m.total), [51.5, 17])
self.assertAllClose(self.evaluate(m.count), [2, 1.2])
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAllClose(self.evaluate(result_t), [52.5 / 3, 18 / 1.4])
self.assertAllClose(self.evaluate(m.total), [52.5, 18])
self.assertAllClose(self.evaluate(m.count), [3, 1.4])
# check weights expand
m = metrics.MeanTensor(dtype=tf.float64)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAllClose(self.evaluate(result_t), [[1], [5]])
self.assertAllClose(self.evaluate(m.total), [[1], [1]])
self.assertAllClose(self.evaluate(m.count), [[1], [0.2]])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_invalid_value_shape(self):
m = metrics.MeanTensor(dtype=tf.float64)
m([1])
with self.assertRaisesRegex(
ValueError, 'MeanTensor input values must always have the same shape'):
m([1, 5])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_build_in_tf_function(self):
"""Ensure that variables are created correctly in a tf function."""
m = metrics.MeanTensor(dtype=tf.float64)
@tf.function
def call_metric(x):
return m(x)
with self.test_session():
self.assertAllClose(self.evaluate(call_metric([100, 40])), [100, 40])
self.assertAllClose(self.evaluate(m.total), [100, 40])
self.assertAllClose(self.evaluate(m.count), [1, 1])
self.assertAllClose(self.evaluate(call_metric([20, 2])), [60, 21])
@combinations.generate(combinations.combine(mode=['eager']))
def test_in_keras_model(self):
class ModelWithMetric(Model):
def __init__(self):
super(ModelWithMetric, self).__init__()
self.dense1 = layers.Dense(
3, activation='relu', kernel_initializer='ones')
self.dense2 = layers.Dense(
1, activation='sigmoid', kernel_initializer='ones')
self.mean_tensor = metrics.MeanTensor()
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
self.mean_tensor(self.dense1.kernel)
return x
model = ModelWithMetric()
model.compile(
loss='mae',
optimizer='rmsprop',
run_eagerly=True)
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y, batch_size=50)
self.assertAllClose(self.evaluate(model.mean_tensor.result()),
np.ones((4, 3)))
self.assertAllClose(self.evaluate(model.mean_tensor.total),
np.full((4, 3), 2))
self.assertAllClose(self.evaluate(model.mean_tensor.count),
np.full((4, 3), 2))
model.evaluate(x, y, batch_size=25)
self.assertAllClose(self.evaluate(model.mean_tensor.result()),
np.ones((4, 3)))
self.assertAllClose(self.evaluate(model.mean_tensor.total),
np.full((4, 3), 4))
self.assertAllClose(self.evaluate(model.mean_tensor.count),
np.full((4, 3), 4))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class BinaryCrossentropyTest(tf.test.TestCase):
def test_config(self):
bce_obj = metrics.BinaryCrossentropy(
name='bce', dtype=tf.int32, label_smoothing=0.2)
self.assertEqual(bce_obj.name, 'bce')
self.assertEqual(bce_obj._dtype, tf.int32)
old_config = bce_obj.get_config()
self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3)
# Check save and restore config
bce_obj2 = metrics.BinaryCrossentropy.from_config(old_config)
self.assertEqual(bce_obj2.name, 'bce')
self.assertEqual(bce_obj2._dtype, tf.int32)
new_config = bce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
bce_obj = metrics.BinaryCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
result = bce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Reduced metric = 7.665 / 2
self.assertAllClose(self.evaluate(result), 3.833, atol=1e-3)
def test_unweighted_with_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = tf.constant([[1, 0, 1], [0, 1, 1]])
y_pred = tf.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
result = bce_obj(y_true, y_pred)
# Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [((100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))),
# ((100 - 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 1 + log(1 + exp(-100))))]
# = [(0 + 0 + 0) / 3, 200 / 3]
# Reduced metric = (0 + 66.666) / 2
self.assertAllClose(self.evaluate(result), 33.333, atol=1e-3)
def test_weighted(self):
bce_obj = metrics.BinaryCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
sample_weight = tf.constant([1.5, 2.])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Metric = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [(0 + 15.33) / 2, (0 + 0) / 2]
# Weighted metric = [7.665 * 1.5, 0]
# Reduced metric = 7.665 * 1.5 / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 3.285, atol=1e-3)
def test_weighted_from_logits(self):
bce_obj = metrics.BinaryCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
y_true = tf.constant([[1, 0, 1], [0, 1, 1]])
y_pred = tf.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
sample_weight = tf.constant([2., 2.5])
result = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# Metric = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [(0 + 0 + 0) / 3, 200 / 3]
# Weighted metric = [0, 66.666 * 2.5]
# Reduced metric = 66.666 * 2.5 / (2 + 2.5)
self.assertAllClose(self.evaluate(result), 37.037, atol=1e-3)
def test_label_smoothing(self):
logits = tf.constant(((100., -100., -100.)))
y_true = tf.constant(((1, 0, 1)))
label_smoothing = 0.1
# Metric: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Label smoothing: z' = z * (1 - L) + 0.5L
# After label smoothing, label 1 becomes 1 - 0.5L
# label 0 becomes 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = metrics.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
self.evaluate(tf.compat.v1.variables_initializer(bce_obj.variables))
result = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAllClose(expected_value, self.evaluate(result), atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CategoricalCrossentropyTest(tf.test.TestCase):
def test_config(self):
cce_obj = metrics.CategoricalCrossentropy(
name='cce', dtype=tf.int32, label_smoothing=0.2)
self.assertEqual(cce_obj.name, 'cce')
self.assertEqual(cce_obj._dtype, tf.int32)
old_config = cce_obj.get_config()
self.assertAllClose(old_config['label_smoothing'], 0.2, 1e-3)
# Check save and restore config
cce_obj2 = metrics.CategoricalCrossentropy.from_config(old_config)
self.assertEqual(cce_obj2.name, 'cce')
self.assertEqual(cce_obj2._dtype, tf.int32)
new_config = cce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
cce_obj = metrics.CategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = cce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# Metric = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Reduced metric = (0.051 + 2.302) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = cce_obj(y_true, logits)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]
# sum(exp(logits), axis=-1) = [8106.802, 2986.394]
# softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# labels * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]
# xent = [0.00045, 7.00182]
# Reduced xent = (0.00045 + 7.00182) / 2
self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)
def test_weighted(self):
cce_obj = metrics.CategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = tf.constant([1.5, 2.])
result = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# Metric = -sum(y * log(y'), axis = -1)
# = -((log 0.95), (log 0.1))
# = [0.051, 2.302]
# Weighted metric = [0.051 * 1.5, 2.302 * 2.]
# Reduced metric = (0.051 * 1.5 + 2.302 * 2.) / 3.5
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_from_logits(self):
cce_obj = metrics.CategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = tf.constant([1.5, 2.])
result = cce_obj(y_true, logits, sample_weight=sample_weight)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# xent = [0.00045, 7.00182]
# weighted xent = [0.000675, 14.00364]
# Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)
def test_label_smoothing(self):
y_true = np.asarray([[0, 1, 0], [0, 0, 1]])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
label_smoothing = 0.1
# Label smoothing: z' = z * (1 - L) + L/n,
# where L = label smoothing value and n = num classes
# Label value 1 becomes: 1 - L + L/n
# Label value 0 becomes: L/n
# y_true with label_smoothing = [[0.0333, 0.9333, 0.0333],
# [0.0333, 0.0333, 0.9333]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# xent = -sum(labels * log(softmax), 1)
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# labels * log(softmax) = [[-0.26641, -0.00042, -0.29971],
# [-0.23316, -0.00006, -6.53479]]
# xent = [0.56654, 6.76801]
# Reduced xent = (0.56654 + 6.76801) / 2
cce_obj = metrics.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
self.evaluate(tf.compat.v1.variables_initializer(cce_obj.variables))
loss = cce_obj(y_true, logits)
self.assertAllClose(self.evaluate(loss), 3.667, atol=1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SparseCategoricalCrossentropyTest(tf.test.TestCase):
def test_config(self):
scce_obj = metrics.SparseCategoricalCrossentropy(
name='scce', dtype=tf.int32)
self.assertEqual(scce_obj.name, 'scce')
self.assertEqual(scce_obj.dtype, tf.int32)
old_config = scce_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config
scce_obj2 = metrics.SparseCategoricalCrossentropy.from_config(old_config)
self.assertEqual(scce_obj2.name, 'scce')
self.assertEqual(scce_obj2.dtype, tf.int32)
new_config = scce_obj2.get_config()
self.assertDictEqual(old_config, new_config)
def test_unweighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = scce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# logits = log(y`) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# sum(exp(logits), axis=-1) = [1, 1]
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# log(softmax) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = scce_obj(y_true, logits)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y_true * log(softmax), 1)
# exp(logits) = [[2.718, 8103.084, 1], [2.718, 2980.958, 2.718]]
# sum(exp(logits), axis=-1) = [8106.802, 2986.394]
# softmax = [[0.00033, 0.99954, 0.00012], [0.00091, 0.99817, 0.00091]]
# log(softmax) = [[-8.00045, -0.00045, -9.00045],
# [-7.00182, -0.00182, -7.00182]]
# y_true * log(softmax) = [[0, -0.00045, 0], [0, 0, -7.00182]]
# xent = [0.00045, 7.00182]
# Reduced xent = (0.00045 + 7.00182) / 2
self.assertAllClose(self.evaluate(result), 3.5011, atol=1e-3)
def test_weighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = tf.constant([1.5, 2.])
result = scce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# logits = log(y`) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# sum(exp(logits), axis=-1) = [1, 1]
# softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
# log(softmax) = [[-2.9957, -0.0513, -16.1181],
# [-2.3026, -0.2231, -2.3026]]
# y * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
# xent = [0.0513, 2.3026]
# Weighted xent = [0.051 * 1.5, 2.302 * 2.]
# Reduced xent = (0.051 * 1.5 + 2.302 * 2.) / 3.5
self.assertAllClose(self.evaluate(result), 1.338, atol=1e-3)
def test_weighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
logits = np.asarray([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = tf.constant([1.5, 2.])
result = scce_obj(y_true, logits, sample_weight=sample_weight)
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
# xent = -sum(y_true * log(softmax), 1)
# xent = [0.00045, 7.00182]
# weighted xent = [0.000675, 14.00364]
# Reduced xent = (0.000675 + 14.00364) / (1.5 + 2)
self.assertAllClose(self.evaluate(result), 4.0012, atol=1e-3)
def test_axis(self):
scce_obj = metrics.SparseCategoricalCrossentropy(axis=0)
self.evaluate(tf.compat.v1.variables_initializer(scce_obj.variables))
y_true = np.asarray([1, 2])
y_pred = np.asarray([[0.05, 0.1], [0.95, 0.8], [0, 0.1]])
result = scce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# logits = log(y`) = [[-2.9957, -2.3026],
# [-0.0513, -0.2231],
# [-16.1181, -2.3026]]
# softmax = exp(logits) / sum(exp(logits), axis=-1)
# y = one_hot(y) = [[0, 0], [1, 0], [0, 1]]
# xent = -sum(y * log(softmax), 1)
# exp(logits) = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# sum(exp(logits)) = [1, 1]
# softmax = [[0.05, 0.1], [0.95, 0.8], [EPSILON, 0.1]]
# log(softmax) = [[-2.9957, -2.3026],
# [-0.0513, -0.2231],
# [-16.1181, -2.3026]]
# y * log(softmax) = [[0, 0], [-0.0513, 0], [0, -2.3026]]
# xent = [0.0513, 2.3026]
# Reduced xent = (0.0513 + 2.3026) / 2
self.assertAllClose(self.evaluate(result), 1.176, atol=1e-3)
class BinaryTruePositives(metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(
tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, dtype=self.dtype)
sample_weight = tf.__internal__.ops.broadcast_weights(
sample_weight, values)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
class BinaryTruePositivesViaControlFlow(metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositivesViaControlFlow, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
for i in range(len(y_true)):
for j in range(len(y_true[i])):
if y_true[i][j] and y_pred[i][j]:
if sample_weight is None:
self.true_positives.assign_add(1)
else:
self.true_positives.assign_add(sample_weight[i][0])
def result(self):
if tf.constant(True):
return self.true_positives
return 0.0
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CustomMetricsTest(tf.test.TestCase):
def test_config(self):
btp_obj = BinaryTruePositives(name='btp', dtype=tf.int32)
self.assertEqual(btp_obj.name, 'btp')
self.assertEqual(btp_obj.dtype, tf.int32)
# Check save and restore config
btp_obj2 = BinaryTruePositives.from_config(btp_obj.get_config())
self.assertEqual(btp_obj2.name, 'btp')
self.assertEqual(btp_obj2.dtype, tf.int32)
def test_unweighted(self):
btp_obj = BinaryTruePositives()
self.evaluate(tf.compat.v1.variables_initializer(btp_obj.variables))
y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
update_op = btp_obj.update_state(y_true, y_pred) # pylint: disable=assignment-from-no-return
self.evaluate(update_op)
result = btp_obj.result()
self.assertEqual(7, self.evaluate(result))
def test_weighted(self):
btp_obj = BinaryTruePositives()
self.evaluate(tf.compat.v1.variables_initializer(btp_obj.variables))
y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
sample_weight = tf.constant([[1.], [1.5], [2.], [2.5]])
result = btp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(12, self.evaluate(result))
def test_autograph(self):
metric = BinaryTruePositivesViaControlFlow()
self.evaluate(tf.compat.v1.variables_initializer(metric.variables))
y_true = tf.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = tf.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
sample_weight = tf.constant([[1.], [1.5], [2.], [2.5]])
@tf.function
def compute_metric(y_true, y_pred, sample_weight):
metric(y_true, y_pred, sample_weight)
return metric.result()
result = compute_metric(y_true, y_pred, sample_weight)
self.assertEqual(12, self.evaluate(result))
def test_metric_wrappers_autograph(self):
def metric_fn(y_true, y_pred):
x = tf.constant(0.0)
for i in range(len(y_true)):
for j in range(len(y_true[i])):
if tf.equal(y_true[i][j], y_pred[i][j]) and y_true[i][j] > 0:
x += 1.0
return x
mean_metric = metrics.MeanMetricWrapper(metric_fn)
sum_metric = metrics.SumOverBatchSizeMetricWrapper(metric_fn)
self.evaluate(tf.compat.v1.variables_initializer(mean_metric.variables))
self.evaluate(tf.compat.v1.variables_initializer(sum_metric.variables))
y_true = tf.constant([[0, 0, 0, 1, 0],
[0, 0, 1, 1, 1],
[1, 1, 1, 1, 0],
[1, 1, 1, 0, 1]])
y_pred = tf.constant([[0, 0, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[1, 1, 1, 1, 1]])
@tf.function
def tf_functioned_metric_fn(metric, y_true, y_pred):
return metric(y_true, y_pred)
metric_result = tf_functioned_metric_fn(mean_metric, y_true, y_pred)
self.assertAllClose(self.evaluate(metric_result), 10, 1e-2)
metric_result = tf_functioned_metric_fn(sum_metric, y_true, y_pred)
self.assertAllClose(self.evaluate(metric_result), 10, 1e-2)
def test_metric_not_tracked_as_sublayer_in_layer(self):
class MyLayer(base_layer.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.mean_obj = metrics.Mean(name='my_mean_obj')
def call(self, x):
self.add_metric(
tf.reduce_sum(x), aggregation='mean', name='my_mean_tensor')
self.add_metric(self.mean_obj(x))
return x
layer = MyLayer()
x = np.ones((1, 1))
layer(x)
self.assertLen(list(layer._flatten_layers(include_self=False)), 0)
self.assertLen(layer.metrics, 2)
def test_metric_not_tracked_as_sublayer_in_model(self):
class MyModel(training_module.Model):
def __init__(self, **kwargs):
super(MyModel, self).__init__(**kwargs)
self.mean_obj = metrics.Mean(name='my_mean_obj')
def call(self, x):
self.add_metric(
tf.reduce_sum(x), aggregation='mean', name='my_mean_tensor')
self.add_metric(self.mean_obj(x))
return x
model = MyModel()
x = np.ones((1, 1))
model(x)
self.assertLen(list(model._flatten_layers(include_self=False)), 0)
self.assertLen(model.layers, 0)
self.assertLen(model.metrics, 2)
def test_invalid_custom_metric_class_error_msg(self):
x = layers.Input(shape=(2,))
y = layers.Dense(3)(x)
model = training_module.Model(x, y)
class BadMetric(metrics.Metric):
def update_state(self, y_true, y_pred, sample_weight=None):
return
def result(self):
return
with self.assertRaisesRegex(RuntimeError,
'can only be a single'):
model.compile('sgd',
'mse',
metrics=[BadMetric()])
model.fit(np.ones((10, 2)), np.ones((10, 3)))
def test_invalid_custom_metric_fn_error_msg(self):
x = layers.Input(shape=(2,))
y = layers.Dense(3)(x)
model = training_module.Model(x, y)
def bad_metric(y_true, y_pred, sample_weight=None): # pylint: disable=unused-argument
return None
def dict_metric(y_true, y_pred, sample_weight=None): # pylint: disable=unused-argument
return {'value': 0.}
with self.assertRaisesRegex(RuntimeError,
'The output of a metric function can only be'):
model.compile('sgd',
'mse',
metrics=[bad_metric])
model.fit(np.ones((10, 2)), np.ones((10, 3)))
with self.assertRaisesRegex(RuntimeError,
'To return a dict of values, implement'):
model.compile('sgd',
'mse',
metrics=[dict_metric])
model.fit(np.ones((10, 2)), np.ones((10, 3)))
def _get_model(compile_metrics):
model_layers = [
layers.Dense(3, activation='relu', kernel_initializer='ones'),
layers.Dense(1, activation='sigmoid', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model.compile(
loss='mae',
metrics=compile_metrics,
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ResetStatesTest(keras_parameterized.TestCase):
def test_reset_state_false_positives(self):
fp_obj = metrics.FalsePositives()
model = _get_model([fp_obj])
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
def test_reset_state_false_negatives(self):
fn_obj = metrics.FalseNegatives()
model = _get_model([fn_obj])
x = np.zeros((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
def test_reset_state_true_negatives(self):
tn_obj = metrics.TrueNegatives()
model = _get_model([tn_obj])
x = np.zeros((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
def test_reset_state_true_positives(self):
tp_obj = metrics.TruePositives()
model = _get_model([tp_obj])
x = np.ones((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
def test_reset_state_precision(self):
p_obj = metrics.Precision()
model = _get_model([p_obj])
x = np.concatenate((np.ones((50, 4)), np.ones((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 50.)
self.assertEqual(self.evaluate(p_obj.false_positives), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.true_positives), 50.)
self.assertEqual(self.evaluate(p_obj.false_positives), 50.)
def test_reset_state_recall(self):
r_obj = metrics.Recall()
model = _get_model([r_obj])
x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)
def test_reset_state_sensitivity_at_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_state_specificity_at_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_state_precision_at_recall(self):
s_obj = metrics.PrecisionAtRecall(recall=0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_state_recall_at_precision(self):
s_obj = metrics.RecallAtPrecision(precision=0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.true_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_positives), 25.)
self.assertEqual(self.evaluate(s_obj.false_negatives), 25.)
self.assertEqual(self.evaluate(s_obj.true_negatives), 25.)
def test_reset_state_auc(self):
auc_obj = metrics.AUC(num_thresholds=3)
model = _get_model([auc_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.)
def test_reset_state_auc_from_logits(self):
auc_obj = metrics.AUC(num_thresholds=3, from_logits=True)
model_layers = [layers.Dense(1, kernel_initializer='ones', use_bias=False)]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model.compile(
loss='mae',
metrics=[auc_obj],
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.concatenate((np.ones((25, 4)), -np.ones((25, 4)), -np.ones(
(25, 4)), np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones(
(25, 1)), np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.)
def test_reset_state_auc_manual_thresholds(self):
auc_obj = metrics.AUC(thresholds=[0.5])
model = _get_model([auc_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
for _ in range(2):
model.evaluate(x, y)
self.assertEqual(self.evaluate(auc_obj.true_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_positives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.false_negatives[1]), 25.)
self.assertEqual(self.evaluate(auc_obj.true_negatives[1]), 25.)
def test_reset_state_mean_iou(self):
m_obj = metrics.MeanIoU(num_classes=2)
model = _get_model([m_obj])
x = np.asarray([[0, 0, 0, 0], [1, 1, 1, 1], [1, 0, 1, 0], [0, 1, 0, 1]],
dtype=np.float32)
y = np.asarray([[0], [1], [1], [1]], dtype=np.float32)
model.evaluate(x, y)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1)
model.evaluate(x, y)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[0], [1, 0], 1e-1)
self.assertArrayNear(self.evaluate(m_obj.total_cm)[1], [3, 0], 1e-1)
def test_reset_state_recall_float64(self):
# Test case for GitHub issue 36790.
try:
backend.set_floatx('float64')
r_obj = metrics.Recall()
model = _get_model([r_obj])
x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.true_positives), 50.)
self.assertEqual(self.evaluate(r_obj.false_negatives), 50.)
finally:
backend.set_floatx('float32')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MergeStateTest(keras_parameterized.TestCase):
def test_merge_state_incompatible_metrics(self):
with self.assertRaisesRegex(ValueError,
'Metric .* is not compatible with .*'):
obj1 = metrics.FalsePositives()
self.evaluate(tf.compat.v1.variables_initializer(obj1.variables))
obj2 = metrics.Accuracy()
self.evaluate(tf.compat.v1.variables_initializer(obj2.variables))
self.evaluate(obj1.merge_state([obj2]))
def test_merge_state_accuracy(self):
a_objs = []
for y_true, y_pred in zip([[[1], [2]], [[3], [4]]],
[[[0], [2]], [[3], [4]]]):
a_obj = metrics.Accuracy()
a_objs.append(a_obj)
self.evaluate(tf.compat.v1.variables_initializer(a_obj.variables))
self.evaluate(a_obj.update_state(y_true, y_pred))
self.evaluate(a_objs[0].merge_state(a_objs[1:]))
self.assertEqual(self.evaluate(a_objs[0].total), 3.)
self.assertEqual(self.evaluate(a_objs[0].count), 4.)
self.assertEqual(self.evaluate(a_objs[0].result()), 0.75)
def test_merge_state_false_positives(self):
fp_objs = []
for _ in range(4):
fp_obj = metrics.FalsePositives()
fp_objs.append(fp_obj)
self.evaluate(tf.compat.v1.variables_initializer(fp_obj.variables))
y_true = np.zeros((25, 1))
y_pred = np.ones((25, 1))
self.evaluate(fp_obj.update_state(y_true, y_pred))
self.evaluate(fp_objs[0].merge_state(fp_objs[1:]))
self.assertEqual(self.evaluate(fp_objs[0].accumulator), 100.)
def test_merge_state_false_negatives(self):
fn_objs = []
for _ in range(4):
fn_obj = metrics.FalseNegatives()
fn_objs.append(fn_obj)
self.evaluate(tf.compat.v1.variables_initializer(fn_obj.variables))
y_true = np.ones((25, 1))
y_pred = np.zeros((25, 1))
self.evaluate(fn_obj.update_state(y_true, y_pred))
self.evaluate(fn_objs[0].merge_state(fn_objs[1:]))
self.assertEqual(self.evaluate(fn_objs[0].accumulator), 100.)
def test_merge_state_true_negatives(self):
tn_objs = []
for _ in range(4):
tn_obj = metrics.TrueNegatives()
tn_objs.append(tn_obj)
self.evaluate(tf.compat.v1.variables_initializer(tn_obj.variables))
y_true = np.zeros((25, 1))
y_pred = np.zeros((25, 1))
self.evaluate(tn_obj.update_state(y_true, y_pred))
self.evaluate(tn_objs[0].merge_state(tn_objs[1:]))
self.assertEqual(self.evaluate(tn_objs[0].accumulator), 100.)
def test_merge_state_true_positives(self):
tp_objs = []
for _ in range(4):
tp_obj = metrics.TruePositives()
tp_objs.append(tp_obj)
self.evaluate(tf.compat.v1.variables_initializer(tp_obj.variables))
y_true = np.ones((25, 1))
y_pred = np.ones((25, 1))
self.evaluate(tp_obj.update_state(y_true, y_pred))
self.evaluate(tp_objs[0].merge_state(tp_objs[1:]))
self.assertEqual(self.evaluate(tp_objs[0].accumulator), 100.)
def test_merge_state_precision(self):
p_objs = []
for _ in range(5):
p_obj = metrics.Precision()
p_objs.append(p_obj)
self.evaluate(tf.compat.v1.variables_initializer(p_obj.variables))
y_true = np.concatenate((np.ones((10, 1)), np.zeros((10, 1))))
y_pred = np.concatenate((np.ones((10, 1)), np.ones((10, 1))))
self.evaluate(p_obj.update_state(y_true, y_pred))
self.evaluate(p_objs[0].merge_state(p_objs[1:]))
self.assertEqual(self.evaluate(p_objs[0].true_positives), 50.)
self.assertEqual(self.evaluate(p_objs[0].false_positives), 50.)
def test_merge_state_recall(self):
r_objs = []
for _ in range(5):
r_obj = metrics.Recall()
r_objs.append(r_obj)
self.evaluate(tf.compat.v1.variables_initializer(r_obj.variables))
y_true = np.concatenate((np.ones((10, 1)), np.ones((10, 1))))
y_pred = np.concatenate((np.ones((10, 1)), np.zeros((10, 1))))
self.evaluate(r_obj.update_state(y_true, y_pred))
self.evaluate(r_objs[0].merge_state(r_objs[1:]))
self.assertEqual(self.evaluate(r_objs[0].true_positives), 50.)
self.assertEqual(self.evaluate(r_objs[0].false_negatives), 50.)
def test_merge_state_sensitivity_at_specificity(self):
sas_objs = []
for _ in range(5):
sas_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1)
sas_objs.append(sas_obj)
self.evaluate(tf.compat.v1.variables_initializer(sas_obj.variables))
y_true = np.concatenate((np.ones((5, 1)), np.zeros((5, 1)), np.ones(
(5, 1)), np.zeros((5, 1))))
y_pred = np.concatenate(( | np.ones((5, 1)) | numpy.ones |
import numpy as np
import pandas as pd
import pytest
from scipy import stats
from locan import LocData
from locan.analysis import BlinkStatistics
from locan.analysis.blinking import _blink_statistics, _DistributionFits
def test__blink_statistics_0():
# frame with on and off periods up to three frames and starting with one-frame on-period.
frames = np.array([0, 4, 6, 7, 8, 12, 13])
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [1, 1, 3, 2])
assert np.array_equal(results["off_periods"], [3, 1, 3])
assert np.array_equal(results["on_periods_frame"], [0, 4, 6, 12])
assert np.array_equal(results["off_periods_frame"], [1, 5, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0], [1], [2, 3, 4], [5, 6]]
)
]
)
results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [1, 5, 2])
assert np.array_equal(results["off_periods"], [3, 3])
assert np.array_equal(results["on_periods_frame"], [0, 4, 12])
assert np.array_equal(results["off_periods_frame"], [1, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0], [1, 2, 3, 4], [5, 6]]
)
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [14])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert all(
[
np.array_equal(one, two)
for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6]])
]
)
def test__blink_statistics_1():
# frame with on and off periods up to three frames and starting with two-frame on-period.
frames = np.array([0, 1, 3, 6, 7, 8, 12, 13])
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 3, 2])
assert np.array_equal(results["off_periods"], [1, 2, 3])
assert np.array_equal(results["on_periods_frame"], [0, 3, 6, 12])
assert np.array_equal(results["off_periods_frame"], [2, 4, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [4, 3, 2])
assert np.array_equal(results["off_periods"], [2, 3])
assert np.array_equal(results["on_periods_frame"], [0, 6, 12])
assert np.array_equal(results["off_periods_frame"], [4, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [14])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]]
)
]
)
def test__blink_statistics_2():
# frame with on and off periods up to three frames and starting with two-frame on-period.
frames = np.array([0, 1, 3, 6, 7, 8, 12, 13]) + 1
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 3, 2])
assert np.array_equal(results["off_periods"], [1, 1, 2, 3])
assert np.array_equal(results["on_periods_frame"], [1, 4, 7, 13])
assert np.array_equal(results["off_periods_frame"], [0, 3, 5, 10])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [5, 3, 2])
assert np.array_equal(results["off_periods"], [2, 3])
assert np.array_equal(results["on_periods_frame"], [0, 7, 13])
assert np.array_equal(results["off_periods_frame"], [5, 10])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [15])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]]
)
]
)
def test__blink_statistics_3():
# frame with on and off periods up to three frames and starting with off-period.
frames = np.array([0, 1, 4, 6, 7, 8, 12, 13]) + 4
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 3, 2])
assert np.array_equal(results["off_periods"], [4, 2, 1, 3])
assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16])
assert np.array_equal(results["off_periods_frame"], [0, 6, 9, 13])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=True)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 3, 2])
assert np.array_equal(results["off_periods"], [2, 1, 3])
assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16])
assert np.array_equal(results["off_periods_frame"], [6, 9, 13])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=2, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [9, 2])
assert np.array_equal(results["off_periods"], [4, 3])
assert np.array_equal(results["on_periods_frame"], [4, 16])
assert np.array_equal(results["off_periods_frame"], [0, 13])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2, 3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=2, remove_heading_off_periods=True)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [9, 2])
assert np.array_equal(results["off_periods"], [3])
assert np.array_equal(results["on_periods_frame"], [4, 16])
assert np.array_equal(results["off_periods_frame"], [13])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1, 2, 3, 4, 5], [6, 7]]
)
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [18])
assert np.array_equal(results["off_periods"], [])
assert np.array_equal(results["on_periods_frame"], [0])
assert np.array_equal(results["off_periods_frame"], [])
assert np.array_equal(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]])
def test__blink_statistics_4():
# frame with on and off periods up to three frames and starting with off-period.
frames = np.array([0, 1, 4, 6, 12, 13]) + 2
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 1, 2])
assert np.array_equal(results["off_periods"], [2, 2, 1, 5])
assert np.array_equal(results["on_periods_frame"], [2, 6, 8, 14])
assert np.array_equal(results["off_periods_frame"], [0, 4, 7, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3], [4, 5]]
)
]
)
results = _blink_statistics(frames, memory=0, remove_heading_off_periods=True)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [2, 1, 1, 2])
assert np.array_equal(results["off_periods"], [2, 1, 5])
assert np.array_equal(results["on_periods_frame"], [2, 6, 8, 14])
assert np.array_equal(results["off_periods_frame"], [4, 7, 9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(
results["on_periods_indices"], [[0, 1], [2], [3], [4, 5]]
)
]
)
results = _blink_statistics(frames, memory=3, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [9, 2])
assert np.array_equal(results["off_periods"], [5])
assert np.array_equal(results["on_periods_frame"], [0, 14])
assert np.array_equal(results["off_periods_frame"], [9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3], [4, 5]])
]
)
results = _blink_statistics(frames, memory=3, remove_heading_off_periods=True)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [7, 2])
assert np.array_equal(results["off_periods"], [5])
assert np.array_equal(results["on_periods_frame"], [2, 14])
assert np.array_equal(results["off_periods_frame"], [9])
assert all(
[
np.array_equal(one, two)
for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3], [4, 5]])
]
)
results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False)
assert len(results["on_periods"]) == len(results["on_periods_frame"])
assert len(results["off_periods"]) == len(results["off_periods_frame"])
assert np.array_equal(results["on_periods"], [16])
assert np.array_equal(results["off_periods"], [])
assert | np.array_equal(results["on_periods_frame"], [0]) | numpy.array_equal |
import psana
from psmon.plots import Image
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from psmon import publish
import numpy as np
import os
import logging
import requests
import socket
import argparse
import sys
import time
import inspect
from threading import Thread, Lock
import zmq
from mpi4py import MPI
f = '%(asctime)s - %(levelname)s - %(filename)s:%(funcName)s - %(message)s'
logging.basicConfig(level=logging.DEBUG, format=f)
logger = logging.getLogger(__name__)
class MpiWorker(object):
"""This worker will collect events and do whatever
necessary processing, then send to master"""
def __init__(self, ds, detector, ipm, jet_cam, jet_cam_axis, evr, r_mask, calib_results,
event_code=40,
plot=False,
data_port=1235):
self._ds = ds # We probably need to use kwargs to make this general
self._detector = detector
self._ipm = ipm
self._jet_cam = jet_cam
self._jet_cam_axis = jet_cam_axis
self._evr = evr
self._comm = MPI.COMM_WORLD
self._rank = self._comm.Get_rank()
self._r_mask = r_mask
self._plot = plot
self._event_code = event_code
self._peak_bin = int(calib_results['peak_bin'])
self._delta_bin = int(calib_results['delta_bin'])
self._i0_thresh = [float(calib_results['i0_low']), float(calib_results['i0_high'])]
self._state = None
self._msg_thread = Thread(target=self.start_msg_thread, args=(data_port,))
self._msg_thread.start()
self._attr_lock = Lock()
print('I0 threshold: {}, {}'.format(self._i0_thresh[0], self._i0_thresh[1]))
@property
def rank(self):
"""Worker ID"""
return self._rank
@property
def ds(self):
"""DataSource object"""
return self._ds
@property
def detector(self):
"""Detectors to get data from"""
return self._detector
@property
def comm(self):
"""MPI communicator"""
return self._comm
@property
def ipm(self):
"""IPM Detector"""
return self._ipm
@property
def evr(self):
"""EVR detector"""
return self._evr
@property
def plot(self):
"""Whether we should plot detector"""
return self._plot
@property
def event_code(self):
"""Event Code to trigger data collection on"""
return self._event_code
@property
def peak_bin(self):
return self._peak_bin
@peak_bin.setter
def peak_bin(self, peak_bin):
with self._attr_lock:
try:
self._peak_bin = int(peak_bin)
except:
logger.warning('You must provide int for peak bin')
@property
def delta_bin(self):
return self._delta_bin
@delta_bin.setter
def delta_bin(self, delta_bin):
with self._attr_lock:
try:
self._delta_bin = int(delta_bin)
except:
logger.warning('You must provide int for delta bin')
@property
def jet_cam(self):
return self._jet_cam
@property
def jet_cam_axis(self):
return self._jet_cam_axis
def start_run(self):
"""Worker should handle any calculations"""
run = next(self._ds.runs()).run()
psana_mask = self.detector.mask(int(run), calib=True, status=True, edges=True, central=False, unbond=False, unbondnbrs=False)
for evt_idx, evt in enumerate(self.ds.events()):
# Definitely not a fan of wrapping the world in a try/except
# but too many possible failure modes from the data
try:
if self.event_code not in self.evr.eventCodes(evt):
continue
with self._attr_lock:
low_bin = self.peak_bin - self.delta_bin
hi_bin = self.peak_bin + self.delta_bin
# Get i0 data, this is different for different ipm detectors
i0 = getattr(self.ipm[0].get(evt), self.ipm[1])()
# Filter based on i0
if i0<self._i0_thresh[0] or i0>self._i0_thresh[1]:
print(f'Bad shot: {i0}')
dropped = 1
intensity = 0
inorm = 0
else:
print(i0)
dropped = 0
# Detector images
calib = self.detector.calib(evt)
calib = calib*psana_mask
det_image = self.detector.image(evt, calib)
az_bins = np.array([ | np.mean(det_image[mask]) | numpy.mean |
# demo inspired by: http://matplotlib.org/examples/pylab_examples/contour_demo.html
from bokeh import mpl
from bokeh.plotting import output_file, show
import matplotlib
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = | np.meshgrid(x, y) | numpy.meshgrid |
import numpy as np
from numba import jit,prange,set_num_threads
from scipy.special import j0,j1
from scipy.spatial import cKDTree
from astropy.cosmology import Planck15 as cosmo
from multiprocessing import Pool
from itertools import repeat
class Plane:
""" Lens Plane construct from input particles
This class constructs a lens plane from 2D positions of particals
and calculates deflection angles and gravitational parameters for
any positions in this plane using P3M algorithm with optimized
Green function and adaptive soften length.
Parameters:
-----------
coor: ndarray of shape (n_particles, 2)
[x,y] coordinates of particles in the unit of kpc/h. x and y
should be in the range of 0 < x,y < box.
box: even int
Physical length of the Plane in kpc/h. Should be even for FFT.
m_p: float or ndarray of shape (n_particles,)
Mass of each particle in 10^6 Msun/h. If float, mass is the
same for all particles.
H: float, default=1.
Physical length for each grid in kpc/h. The number of grids is
simply (box/H)^2.
p: int, default=2
Mass assignment and force intepolation scheme. 1 for CIC, 2 for
TSC and 3 for PCS.
a: float, default=6.
The soften length in PM: a_pm = a*H.
fftw: bool, default=True
If True, using pyfftw for FFT, which can be paralleled. If False,
using numpy for FFT.
green: ndarray of shape (box/H, box/H), default=None
Green function used to solve Poisson's equation. If None,
optimized Green function is calculated automatically. If you're
building a lot of Plane with the same parameters (box, H, p, a),
you're recommanded to calculate and save the optimized Green func-
tion using Plane.Green function and input it directly.
core: int, default=5
Core number used for parallel.
Attributes:
------------
density_map: ndarray of shape (box/H, box/H)
Surface density for each grid after mass assignment with the
unit 10^6 h Msun/kpc^2.
PM_field_grid: ndarray of shape (2, box/H, box/H)
PM force grid used for force intepolation with the unit (km/s)^2.
PM_field_grid[0] for the force of x direction and PM_field_grid[1]
for the y direction.
"""
def __init__(self,coor,box,m_p,H=1,p=2,a=6,fftw=True,green=None,core=5):
self._box = box
m_p = np.atleast_1d(m_p)
if len(m_p) == 1:
self._m_p = np.ones(len(coor))*m_p
else:
self._m_p = m_p
self._H = H
self._p = p
self._a = a
self._core = core
self._set_numba_threads(core)
self._coor = coor
self._fftw = fftw
self._tree = cKDTree(self._coor,leafsize=40,boxsize=self._box)
self._green = green
self.density_map = self._paint(self._coor,self._box,self._m_p,self._H,self._p)
self.PM_field_grid = self._PM_grid()
def __del__(self):
pass
def _set_numba_threads(self,core):
set_num_threads(core)
def _paint(self,coor,box,m_p,H,p):
coor = coor / H
box = int(round(box / H))
x = coor[:,0]
y = coor[:,1]
if p == 1:
number = self._paint_cic(box,x,y,m_p)
if p == 2:
number = self._paint_tsc(box,x,y,m_p)
if p == 3:
number = self._paint_PCS(box,x,y,m_p)
return number / H**2
@staticmethod
@jit(nopython=True)#, parallel=True)
def _paint_cic(box,x,y,m_p):
lense = box
xgrid = np.zeros((box,box))
for i in prange(len(x)):
cx = np.int64(np.ceil(x[i]))
cy = np.int64(np.ceil(y[i]))
fx = cx - 1
fy = cy - 1
cx_w = 1 - (cx - x[i])
cy_w = 1 - (cy - y[i])
fx_w = 1 - (x[i] - fx)
fy_w = 1 - (y[i] - fy)
xgrid[cy%lense,cx%lense] += cy_w*cx_w*m_p[i]
xgrid[cy%lense,fx%lense] += cy_w*fx_w*m_p[i]
xgrid[fy%lense,cx%lense] += fy_w*cx_w*m_p[i]
xgrid[fy%lense,fx%lense] += fy_w*fx_w*m_p[i]
return xgrid
@staticmethod
@jit(nopython=True)#, parallel=True)
def _paint_tsc(box,x,y,m_p):
lense = box
xgrid = np.zeros((lense,lense))
for i in prange(len(x)):
cx = np.int64(np.ceil(x[i]))
cy = np.int64(np.ceil(y[i]))
fx = cx - 1
fy = cy - 1
if cx - x[i] < 0.5:
ax = cx + 1
cx_w = 0.75 - (cx - x[i])**2
ax_w = 0.5 * (1.5 - ax + x[i])**2
fx_w = 0.5 * (1.5 - x[i] + fx)**2
else:
ax = fx - 1
cx_w = 0.5 * (1.5 - cx + x[i])**2
ax_w = 0.5 * (1.5 - x[i] + ax)**2
fx_w = 0.75 - (x[i] - fx)**2
if cy - y[i] < 0.5:
ay = cy + 1
cy_w = 0.75 - (cy - y[i])**2
ay_w = 0.5 * (1.5 - ay + y[i])**2
fy_w = 0.5 * (1.5 - y[i] + fy)**2
else:
ay = fy - 1
cy_w = 0.5 * (1.5 - cy + y[i])**2
ay_w = 0.5 * (1.5 - y[i] + ay)**2
fy_w = 0.75 - (y[i] - fy)**2
xgrid[cy%lense,cx%lense] += cy_w*cx_w*m_p[i]
xgrid[cy%lense,fx%lense] += cy_w*fx_w*m_p[i]
xgrid[fy%lense,cx%lense] += fy_w*cx_w*m_p[i]
xgrid[fy%lense,fx%lense] += fy_w*fx_w*m_p[i]
xgrid[cy%lense,ax%lense] += cy_w*ax_w*m_p[i]
xgrid[fy%lense,ax%lense] += fy_w*ax_w*m_p[i]
xgrid[ay%lense,cx%lense] += ay_w*cx_w*m_p[i]
xgrid[ay%lense,fx%lense] += ay_w*fx_w*m_p[i]
xgrid[ay%lense,ax%lense] += ay_w*ax_w*m_p[i]
return xgrid
@staticmethod
@jit(nopython=True)
def _paint_PCS(box,x,y):
lense = box
xgrid = np.zeros((lense,lense))
for i in prange(len(x)):
cx = np.int64(np.ceil(x[i]))
cy = np.int64(np.ceil(y[i]))
fx = cx - 1
fy = cy - 1
acx = cx + 1
acy = cy + 1
afx = fx - 1
afy = fy - 1
cx_w = 1./6*(4.-6*(cx-x[i])**2+3.*(cx-x[i])**3)
cy_w = 1./6*(4.-6*(cy-y[i])**2+3.*(cy-y[i])**3)
fx_w = 1./6*(4.-6*(fx-x[i])**2+3.*(x[i]-fx)**3)
fy_w = 1./6*(4.-6*(fy-y[i])**2+3.*(y[i]-fy)**3)
acx_w = 1./6*(2-(acx-x[i]))**3
acy_w = 1./6*(2-(acy-y[i]))**3
afx_w = 1./6*(2-(x[i]-afx))**3
afy_w = 1./6*(2-(y[i]-afy))**3
xgrid[cy%lense,cx%lense] += cy_w*cx_w*m_p[i]
xgrid[cy%lense,fx%lense] += cy_w*fx_w*m_p[i]
xgrid[cy%lense,acx%lense] += cy_w*acx_w*m_p[i]
xgrid[cy%lense,afx%lense] += cy_w*afx_w*m_p[i]
xgrid[fy%lense,cx%lense] += fy_w*cx_w*m_p[i]
xgrid[fy%lense,fx%lense] += fy_w*fx_w*m_p[i]
xgrid[fy%lense,acx%lense] += fy_w*acx_w*m_p[i]
xgrid[fy%lense,afx%lense] += fy_w*afx_w*m_p[i]
xgrid[acy%lense,cx%lense] += acy_w*cx_w*m_p[i]
xgrid[acy%lense,fx%lense] += acy_w*fx_w*m_p[i]
xgrid[acy%lense,acx%lense] += acy_w*acx_w*m_p[i]
xgrid[acy%lense,afx%lense] += acy_w*afx_w*m_p[i]
xgrid[afy%lense,cx%lense] += afy_w*cx_w*m_p[i]
xgrid[afy%lense,fx%lense] += afy_w*fx_w*m_p[i]
xgrid[afy%lense,acx%lense] += afy_w*acx_w*m_p[i]
xgrid[afy%lense,afx%lense] += afy_w*afx_w*m_p[i]
return xgrid
@staticmethod
@jit(nopython=True)#,parallel=True)
def _differece(potential,alpha,H): #alpha prefer 4/3
# difference
f1y = np.zeros(potential.shape)
f1y[1:-1] = (potential[2:] - potential[:-2]) / (2. * H)
f1y[0] = (potential[1] - potential[0]) / H
f1y[-1] = (potential[-2] - potential[-1]) / H
f1x = np.zeros(potential.shape)
f1x[:,1:-1] = (potential[:,2:] - potential[:,:-2]) / (2. * H)
f1x[:,0] = (potential[:,1] - potential[:,0]) / H
f1x[:,-1] = (potential[:,-2] - potential[:,-1]) / H
f2y = np.zeros(potential.shape)
f2y[2:-2] = (potential[4:] - potential[:-4]) / (4. * H)
f2y[0] = (potential[2] - potential[0]) / (2. * H)
f2y[1] = (potential[3] - potential[0]) / (3. * H)
f2y[-1] = (potential[-3] - potential[-1]) / (2. * H)
f2y[-2] = (potential[-4] - potential[-1]) / (3. * H)
f2x = np.zeros(potential.shape)
f2x[:,2:-2] = (potential[:,4:] - potential[:,:-4]) / (4. * H)
f2x[:,0] = (potential[:,2] - potential[:,0]) / (2. * H)
f2x[:,1] = (potential[:,3] - potential[:,0]) / (3. * H)
f2x[:,-1] = (potential[:,-3] - potential[:,-1]) / (2. * H)
f2x[:,-2] = (potential[:,-4] - potential[:,-1]) / (3. * H)
return alpha * np.stack((f1x,f1y)) + (1. - alpha) * np.stack((f2x,f2y))
def _PM_grid(self):
# calculate force on grid
if self._green is None:
gk, kx, ky = Green(self._box, self._H, self._p, self._a, self._core)
else:
gk = self._green
if self._fftw == False:
sigmak = np.fft.fft2(self.density_map)
phik = sigmak * gk
phik[0,0] = 0
phi = np.fft.ifft2(phik)
phi = phi.real
field = -1.*self._differece(phi,4./3.,self._H) # (km/s)^ 2
else:
import pyfftw
density_pfw = pyfftw.empty_aligned(gk.shape, dtype='complex128', n=16)
density_pfw = self.density_map + 1j*0.0
sigmak = pyfftw.interfaces.numpy_fft.fft2(density_pfw, threads=self._core)
phik = sigmak * gk
phik[0,0] = 0
phi = pyfftw.interfaces.numpy_fft.ifft2(phik, threads=self._core)
phi = phi.real
field = -1.*self._differece(phi,4./3.,self._H) # (km/s)^ 2
return field
def PM_field(self,x,y):
"""
PM force field for required positions
Parameters:
-----------
x: ndarray of any shape
x coordinates of required positions.
y: ndarray of any shape
y coordinates of required positions.
Returns:
-----------
f: ndarray of shape (2, x.shape[0], x.shape[1])
x and y direction PM force field for required
positions in (km/s)^2.
"""
return self.__interpolate_PM_field(self.PM_field_grid,x,y,self._p,self._H)
@staticmethod
@jit(nopython=True, parallel=True)
def __interpolate_PM_field(PM_field_grid, x, y, p, H):
#interpolate grid force to whole space
xt = x / H
yt = y / H
forcex = PM_field_grid[0]
lense = forcex.shape[0]
forcey = PM_field_grid[1]
xp = xt.reshape(xt.size)
yp = yt.reshape(yt.size)
force_interx = np.zeros(xp.shape)
force_intery = np.zeros(xp.shape)
for i in prange(len(force_interx)):
cx = np.int64(np.ceil(xp[i]))
cy = np.int64(np.ceil(yp[i]))
fx = cx - 1
fy = cy - 1
if p == 1:
cx_w = 1 - (cx - xp[i])
cy_w = 1 - (cy - yp[i])
fx_w = 1 - (xp[i] - fx)
fy_w = 1 - (yp[i] - fy)
force_interx[i] = forcex[cy%lense,cx%lense]*cy_w*cx_w + forcex[cy%lense,fx%lense]*cy_w*fx_w + forcex[fy%lense,cx%lense]*fy_w*cx_w + forcex[fy%lense,fx%lense]*fy_w*fx_w
force_intery[i] = forcey[cy%lense,cx%lense]*cy_w*cx_w + forcey[cy%lense,fx%lense]*cy_w*fx_w + forcey[fy%lense,cx%lense]*fy_w*cx_w + forcey[fy%lense,fx%lense]*fy_w*fx_w
if p == 2:
if cx - xp[i] < 0.5:
ax = cx + 1
cx_w = 0.75 - (cx - xp[i])**2
ax_w = 0.5 * (1.5 - ax + xp[i])**2
fx_w = 0.5 * (1.5 - xp[i] + fx)**2
else:
ax = fx - 1
cx_w = 0.5 * (1.5 - cx + xp[i])**2
ax_w = 0.5 * (1.5 - xp[i] + ax)**2
fx_w = 0.75 - (xp[i] - fx)**2
if cy - yp[i] < 0.5:
ay = cy + 1
cy_w = 0.75 - (cy - yp[i])**2
ay_w = 0.5 * (1.5 - ay + yp[i])**2
fy_w = 0.5 * (1.5 - yp[i] + fy)**2
else:
ay = fy - 1
cy_w = 0.5 * (1.5 - cy + yp[i])**2
ay_w = 0.5 * (1.5 - yp[i] + ay)**2
fy_w = 0.75 - (yp[i] - fy)**2
force_interx[i] = forcex[cy%lense,cx%lense]*cy_w*cx_w + forcex[cy%lense,fx%lense]*cy_w*fx_w +\
forcex[fy%lense,cx%lense]*fy_w*cx_w + forcex[fy%lense,fx%lense]*fy_w*fx_w + forcex[cy%lense,ax%lense]*cy_w*ax_w +\
forcex[fy%lense,ax%lense]*fy_w*ax_w + forcex[ay%lense,cx%lense]*ay_w*cx_w + forcex[ay%lense,fx%lense]*ay_w*fx_w +\
forcex[ay%lense,ax%lense]*ay_w*ax_w
force_intery[i] = forcey[cy%lense,cx%lense]*cy_w*cx_w + forcey[cy%lense,fx%lense]*cy_w*fx_w +\
forcey[fy%lense,cx%lense]*fy_w*cx_w + forcey[fy%lense,fx%lense]*fy_w*fx_w + forcey[cy%lense,ax%lense]*cy_w*ax_w +\
forcey[fy%lense,ax%lense]*fy_w*ax_w + forcey[ay%lense,cx%lense]*ay_w*cx_w + forcey[ay%lense,fx%lense]*ay_w*fx_w +\
forcey[ay%lense,ax%lense]*ay_w*ax_w
if p == 3:
acx = cx + 1
acy = cy + 1
afx = fx - 1
afy = fy - 1
cx_w = 1./6*(4.-6*(cx-xp[i])**2+3.*(cx-xp[i])**3)
cy_w = 1./6*(4.-6*(cy-yp[i])**2+3.*(cy-yp[i])**3)
fx_w = 1./6*(4.-6*(fx-xp[i])**2+3.*(xp[i]-fx)**3)
fy_w = 1./6*(4.-6*(fy-yp[i])**2+3.*(yp[i]-fy)**3)
acx_w = 1./6*(2-(acx-xp[i]))**3
acy_w = 1./6*(2-(acy-yp[i]))**3
afx_w = 1./6*(2-(xp[i]-afx))**3
afy_w = 1./6*(2-(yp[i]-afy))**3
force_interx[i] = forcex[cy%lense,cx%lense]*cy_w*cx_w + forcex[cy%lense,fx%lense]*cy_w*fx_w +\
forcex[cy%lense,acx%lense]*cy_w*acx_w + forcex[cy%lense,afx%lense]*cy_w*afx_w + forcex[fy%lense,cx%lense]*fy_w*cx_w + forcex[fy%lense,fx%lense]*fy_w*fx_w +\
forcex[fy%lense,acx%lense]*fy_w*acx_w + forcex[fy%lense,afx%lense]*fy_w*afx_w + forcex[acy%lense,cx%lense]*acy_w*cx_w + forcex[acy%lense,fx%lense]*acy_w*fx_w +\
forcex[acy%lense,acx%lense]*acy_w*acx_w + forcex[acy%lense,afx%lense]*acy_w*afx_w + forcex[afy%lense,cx%lense]*afy_w*cx_w + forcex[afy%lense,fx%lense]*afy_w*fx_w +\
forcex[afy%lense,acx%lense]*afy_w*acx_w + forcex[afy%lense,afx%lense]*afy_w*afx_w
force_intery[i] = forcey[cy%lense,cx%lense]*cy_w*cx_w + forcey[cy%lense,fx%lense]*cy_w*fx_w +\
forcey[cy%lense,acx%lense]*cy_w*acx_w + forcey[cy%lense,afx%lense]*cy_w*afx_w + forcey[fy%lense,cx%lense]*fy_w*cx_w + forcey[fy%lense,fx%lense]*fy_w*fx_w +\
forcey[fy%lense,acx%lense]*fy_w*acx_w + forcey[fy%lense,afx%lense]*fy_w*afx_w + forcey[acy%lense,cx%lense]*acy_w*cx_w + forcey[acy%lense,fx%lense]*acy_w*fx_w +\
forcey[acy%lense,acx%lense]*acy_w*acx_w + forcey[acy%lense,afx%lense]*acy_w*afx_w + forcey[afy%lense,cx%lense]*afy_w*cx_w + forcey[afy%lense,fx%lense]*afy_w*fx_w +\
forcey[afy%lense,acx%lense]*afy_w*acx_w + forcey[afy%lense,afx%lense]*afy_w*afx_w
return np.stack((force_interx.reshape(x.shape),force_intery.reshape(y.shape)))
def PP_field(self,x,y,N=400):
"""
PP force field for required positions
Parameters:
-----------
x: ndarray of any shape
x coordinates of required positions.
y: ndarray of any shape
y coordinates of required positions.
N: int, default=400
Number of particles used in adaptive soften length.
Returns:
-----------
f: ndarray of shape (2, x.shape[0], x.shape[1])
x and y direction PP force field for required positions
in (km/s)^2.
"""
@jit(nopython=True)
def get_index(count):
index = np.zeros(count.size + 1,dtype=np.int64)
index[0] = 0
for i in range(len(count)):
index[i+1] = index[i] + count[i]
return index
@jit(nopython=True)
def PM_f1(x,a):
ep = 2.*x/a
return 1./a*(7.43080530e-01*ep**4-1.83299236e+00*ep**3-5.71160351e-02*ep**2+2.67270709e+00*ep-8.24463263e-05)
@jit(nopython=True)
def PM_f2(x,a):
ep = 2.*x/a
return 1./a*(1.53996716/ep-6.8231916+15.10702097*ep-11.85624512*ep**2+4.08123043*ep**3-0.52410421*ep**4)
@jit(nopython=True)
def f_pm(x,a):
f = np.zeros(x.shape)
f = np.where(x<a/2.,PM_f1(x,a),PM_f2(x,a))
f = np.where(x>a,1./x,f)
return f
@jit(nopython=True, parallel=True)
def PP(coor_inter1,coor_inter2,coor_part,ind1,ind2,index,m_p,am,ap1,ap2,box):
l1 = len(coor_inter1)
l2 = len(coor_inter2)
PP_fx = np.zeros(l1+l2)
PP_fy = np.zeros(l1+l2)
for i in prange(l1+l2):
if i < l2:
coor_p = coor_part[ind2[index[i]:index[i+1]]]
m = m_p[ind2[index[i]:index[i+1]]]
displace = coor_p - coor_inter2[i]
distance = np.sqrt(np.sum(displace**2,axis=1))
displace = np.transpose(displace)
part = displace / distance
f = 8.60183454013995*m*(f_pm(distance,ap2[i]) - f_pm(distance,am))*part
fi = np.sum(f,axis=1)
PP_fx[i] = fi[0]
PP_fy[i] = fi[1]
else:
coor_p = coor_part[ind1[i-l2]]
m = m_p[ind1[i-l2]]
displace = coor_p - coor_inter1[i-l2]
displace = np.where(displace>box/2.,displace-box,displace)
displace = np.where(displace<-1*box/2,displace+box,displace)
distance = np.sqrt(np.sum(displace**2,axis=1))
displace = np.transpose(displace)
part = displace / distance
f = 8.60183454013995*m*(f_pm(distance,ap1[i-l2]) - f_pm(distance,am))*part
fi = np.sum(f,axis=1)
PP_fx[i] = fi[0]
PP_fy[i] = fi[1]
return PP_fx,PP_fy
@jit(nopython=True, parallel=True)
def PP_point(coor_inter,coor_part,ind,index,m_p,a,count):
PP_fx = np.zeros(len(index)-1)
PP_fy = np.zeros(len(index)-1)
for i in prange(len(index)-1):
if index[i]==index[i+1]:
continue
else:
coor_p = coor_part[ind[index[i]:index[i+1]]]
m = m_p[ind[index[i]:index[i+1]]]
displace = coor_p - coor_inter[i]
distance = np.sqrt(np.sum(displace**2,axis=1))
displace = np.transpose(displace)
part = displace / distance
f = 8.60183454013995*m*(1/distance - f_pm(distance,a))*part
fi = np.sum(f,axis=1)
PP_fx[i] = fi[0]
PP_fy[i] = fi[1]
return PP_fx,PP_fy
xp = x.reshape(x.size)
yp = y.reshape(y.size)
xp = xp%self._box
yp = yp%self._box
coor_inter = np.array([xp,yp]).T
if N != 0:
dis_neigh,neigh = self._tree.query(coor_inter, k=N, workers=self._core)
dis_neigh = dis_neigh[:,-1]
j = dis_neigh<(self._a*self._H)
nj = ~j
coor_inter1 = coor_inter[nj]
coor_inter2 = coor_inter[j]
dis_neigh1 = dis_neigh[nj]
dis_neigh2 = dis_neigh[j]
ind1 = neigh[nj]
if len(coor_inter2) != 0:
ind2 = self._tree.query_ball_point(coor_inter2,r=self._a*self._H,workers=self._core)
arr_len = np.frompyfunc(len,1,1)
count2 = arr_len(ind2).astype(int)
ind2 = np.hstack(ind2)
else:
count2 = np.zeros(0,dtype=int)
ind2 = np.zeros(0,dtype=int)
index = get_index(count2)
ind1 = ind1.astype(int)
ind2 = ind2.astype(int)
PP_fx_t, PP_fy_t = PP(coor_inter1,coor_inter2,self._coor,ind1,ind2,index,self._m_p,self._a*self._H,dis_neigh1,dis_neigh2,float(self._box))
PP_fx = np.zeros(PP_fx_t.shape)
PP_fx[j] = PP_fx_t[0:len(dis_neigh2)]
PP_fx[nj] = PP_fx_t[len(dis_neigh2):]
PP_fy = np.zeros(PP_fy_t.shape)
PP_fy[j] = PP_fy_t[0:len(dis_neigh2)]
PP_fy[nj] = PP_fy_t[len(dis_neigh2):]
else:
ind = self._tree.query_ball_point(coor_inter,r=self._a*self._H,workers=self._core)
arr_len = np.frompyfunc(len,1,1)
count = arr_len(ind).astype(int)
ind = np.hstack(ind)
ind = ind.astype(int)
index = get_index(count)
PP_fx, PP_fy = PP_point(coor_inter,self._coor,ind,index,self._m_p,self._a*self._H,count)
return np.stack((PP_fx.reshape(x.shape),PP_fy.reshape(y.shape)))
def total_field(self,x,y,PP=True,N=400):
"""
Total force field for required positions.
Parameters:
-----------
x: ndarray of any shape
x coordinates of required positions.
y: ndarray of any shape
y coordinates of required positions.
PP: bool, default=True
If False, only performing PM.
N: int, default=400
Number of particles used in adaptive soften length of PP.
Returns:
-----------
f: ndarray of shape (2, x.shape[0], x.shape[1])
x and y direction total force field for required positions
in (km/s)^2.
"""
if PP==True:
return self.PM_field(x, y) + self.PP_field(x,y,N)
else:
return self.PM_field(x, y)
def deflection_angle(self,x,y,PP=True,N=400):
"""
Deflection angles for required positions.
Parameters:
-----------
x: ndarray of any shape
x coordinates of required positions.
y: ndarray of any shape
y coordinates of required positions.
PP: bool, default=True
If False, only performing PM.
N: int, default=400
Number of particles used in adaptive soften length of PP.
Returns:
-----------
f: ndarray of shape (2, x.shape[0], x.shape[1])
x and y direction deflection angles for required positions
in radian.
"""
return self.total_field(x,y,PP,N)*(-2)/(3e5)**2 # rad
@staticmethod
@jit(nopython=True,parallel=True)
def _lens(angle_mx,angle_px,angle_my,angle_py,d,H,zl,zs,offset,Ds,Dl,Dls):
# for Function lense_parameter
angle_dx = (angle_px-angle_mx)/(2.*d*H)
angle_dy = (angle_py-angle_my)/(2.*d*H)
convergence = 0.5*(angle_dx[0]+angle_dy[1])
convergence += offset
shear1 = 0.5*(angle_dx[0]-angle_dy[1])
shear2 = 0.5*(angle_dx[1]+angle_dy[0])
scale = Dls*Dl/Ds
convergence *= scale
shear1 *= scale
shear2 *= scale
magnification = 1./((1.-convergence)**2-shear1**2-shear2**2)
return np.stack((convergence,shear1,shear2,magnification))
def lense_parameter(self,x,y,d=0.05,PP=True,N=400,zl=0.5,zs=1.0,cosmo=cosmo):
"""
Lensing parameters for required positions. Should be used only
for single plane problems.
Parameters:
-----------
x: ndarray of any shape
x coordinates of required positions.
y: ndarray of any shape
y coordinates of required positions.
d: float, default=0.05
Difference step d*H used to calculate lensing parameters. Defle-
ction angles at x+d*H, x-d*H, y+d*H and y-d*H are calculated
to derive lensing parameters at (x, y).
PP: bool, default=True
If False, only performing PM.
N: int, default=400
Number of particles used in adaptive soften length of PP.
zl: float, default=0.5
Redshift of the lens plane.
zs: float, default=1.0
Redshift of the source plane.
cosmo: astropy.cosmology, default=Planck15
Cosmology used to calculate angular diameter distances.
Returns:
-----------
parameters: ndarray of shape (4, x.shape[0], x.shape[1])
[convergence,shear1,shear2,magnification] for required
positions.
"""
Ds = cosmo.angular_diameter_distance(zs).value*1000.*cosmo.h
Dl = cosmo.angular_diameter_distance(zl).value*1000.*cosmo.h
Dls = cosmo.angular_diameter_distance_z1z2(zl, zs).value*1000.*cosmo.h
angle_mx = self.deflection_angle((x-d*self._H),y,PP,N)
angle_px = self.deflection_angle((x+d*self._H),y,PP,N)
angle_my = self.deflection_angle(x,(y-d*self._H),PP,N)
angle_py = self.deflection_angle(x,(y+d*self._H),PP,N)
offset = np.sum(self._m_p)/self._box**2*4.*np.pi*4.300917270069975/(3e5)**2
return self._lens(angle_mx,angle_px,angle_my,angle_py,d,self._H,zl,zs,offset,Ds,Dl,Dls)
#Green function
def green(kx,ky,H=1,p=2,a=6.,alpha=4./3.,n=1):
def sr(k,a):
result = np.where(k==0,1.,128./(k**3*a**3)*j1(k*a/2.)-32./(k**2*a**2)*j0(k*a/2.))
return result
def R(kx,ky,a):
k = np.sqrt(kx**2+ky**2)
if a != 0:
s = sr(k,a)
else:
s = 1.
return | np.stack((-1j*kx*s**2/k**2,-1j*ky*s**2/k**2)) | numpy.stack |
"""
common functions for pattern extraction
"""
import argparse
import copy
import sys
from functools import reduce, partial
from operator import itemgetter
from typing import List, Tuple, Dict, Optional, Callable, Any, Iterable, Union, Set, Hashable
import typing
from sklearn.feature_selection import RFECV
from sklearn.model_selection import cross_val_score
from statsmodels import robust
from ticc.TICC_solver import TICC
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
import json
import os
import pickle
import logging
import string
from collections import Counter, OrderedDict
from itertools import groupby
from util import *
from multiprocessing import Pool
from concurrent.futures import ProcessPoolExecutor
import matplotlib
matplotlib.use("Agg")
def combine_user_series(series_lookup: Dict[Hashable, np.ndarray], noise: np.ndarray,
default_noise_duration=1000) -> Tuple[Dict[Hashable, Tuple[int, int]], np.ndarray]:
"""
concatenate all the series in series lookup together
:param series_lookup: dictionary mapping uid to time series
:param noise: noise values to tile between users
:param default_noise_duration:
:return: dictionary mapping uid to a tuple of the start and end indices in all_series for that user and the
resulting concatenate series
"""
uids = sorted(series_lookup.keys())
all_series = series_lookup[uids[0]]
idx_lookup = {}
idx_lookup[uids[0]] = (0, len(all_series))
for uid in uids[1:]:
ser = series_lookup[uid]
idx_lookup[uid] = (len(all_series) + default_noise_duration,
len(all_series) + default_noise_duration + len(ser))
all_series = np.concatenate((all_series, | np.tile(noise, (default_noise_duration, 1)) | numpy.tile |
# Copyright (c) <NAME>, <NAME>, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Offline Bandit Algorithms."""
from collections import OrderedDict
from dataclasses import dataclass
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
from scipy.special import softmax
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import clone
from sklearn.base import is_classifier
from sklearn.linear_model import LogisticRegression
from sklearn.utils import check_random_state
from sklearn.utils import check_scalar
import torch
import torch.nn as nn
from torch.nn.functional import mse_loss
import torch.optim as optim
from tqdm import tqdm
from obp.ope import RegressionModel
from ..utils import check_array
from ..utils import check_bandit_feedback_inputs
from ..utils import check_tensor
from ..utils import softmax as softmax_axis1
from .base import BaseOfflinePolicyLearner
@dataclass
class IPWLearner(BaseOfflinePolicyLearner):
"""Off-policy learner based on Inverse Probability Weighting and Supervised Classification.
Parameters
-----------
n_actions: int
Number of actions.
len_list: int, default=1
Length of a list of actions in a recommendation/ranking inferface, slate size.
When Open Bandit Dataset is used, 3 should be set.
base_classifier: ClassifierMixin
Machine learning classifier used to train an offline decision making policy.
References
------------
<NAME>, <NAME>, <NAME>, and <NAME>.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
"Large-scale Validation of Counterfactual Learning Methods: A Test-Bed.", 2016.
"""
base_classifier: Optional[ClassifierMixin] = None
def __post_init__(self) -> None:
"""Initialize class."""
super().__post_init__()
if self.base_classifier is None:
self.base_classifier = LogisticRegression(random_state=12345)
else:
if not is_classifier(self.base_classifier):
raise ValueError("`base_classifier` must be a classifier")
self.base_classifier_list = [
clone(self.base_classifier) for _ in np.arange(self.len_list)
]
@staticmethod
def _create_train_data_for_opl(
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Create training data for off-policy learning.
Parameters
-----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_b(a_i|x_i)`.
Returns
--------
(X, sample_weight, y): Tuple[np.ndarray, np.ndarray, np.ndarray]
Feature vectors, sample weights, and outcome for training the base machine learning model.
"""
return context, (reward / pscore), action
def fit(
self,
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
) -> None:
"""Fits an offline bandit policy on the given logged bandit data.
Note
--------
This `fit` method trains a deterministic policy :math:`\\pi: \\mathcal{X} \\rightarrow \\mathcal{A}`
via a cost-sensitive classification reduction as follows:
.. math::
\\hat{\\pi}
& \\in \\arg \\max_{\\pi \\in \\Pi} \\hat{V}_{\\mathrm{IPW}} (\\pi ; \\mathcal{D}) \\\\
& = \\arg \\max_{\\pi \\in \\Pi} \\mathbb{E}_{n} \\left[\\frac{\\mathbb{I} \\{\\pi (x_{i})=a_{i} \\}}{\\pi_{b}(a_{i} | x_{i})} r_{i} \\right] \\\\
& = \\arg \\min_{\\pi \\in \\Pi} \\mathbb{E}_{n} \\left[\\frac{r_i}{\\pi_{b}(a_{i} | x_{i})} \\mathbb{I} \\{\\pi (x_{i}) \\neq a_{i} \\} \\right],
where :math:`\\mathbb{E}_{n} [\cdot]` is the empirical average over observations in :math:`\\mathcal{D}`.
See the reference for the details.
Parameters
-----------
context: array-like, shape (n_rounds, dim_context)
Context vectors observed for each data, i.e., :math:`x_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_b(a_i|x_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, a learner assumes that only a single action is chosen for each data.
"""
check_bandit_feedback_inputs(
context=context,
action=action,
reward=reward,
pscore=pscore,
position=position,
)
if (reward < 0).any():
raise ValueError(
"A negative value is found in `reward`."
"`obp.policy.IPWLearner` cannot handle negative rewards,"
"and please use `obp.policy.NNPolicyLearner` instead."
)
if pscore is None:
n_actions = np.int32(action.max() + 1)
pscore = np.ones_like(action) / n_actions
if self.len_list == 1:
position = np.zeros_like(action, dtype=int)
else:
if position is None:
raise ValueError("When `self.len_list > 1`, `position` must be given.")
for p in | np.arange(self.len_list) | numpy.arange |
#! /usr/bin/python3
print('this is cell-analyzer v0.1.0' + '\n')
print('preparing image segmentation run...' + '\n')
import os
import glob
import numpy as np
import pandas as pd
import skimage as sk
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy as shc
from datetime import datetime as dt
from matplotlib.colors import ListedColormap, LogNorm
from matplotlib import cm
from skimage import exposure, feature, filters, measure, morphology, segmentation
from scipy import ndimage as ndi
from sklearn.cluster import AgglomerativeClustering
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
import umap
import warnings
warnings.filterwarnings("ignore")
def process_image(img, norm_window, min_hole_size, min_cell_size, extrema_blur, peak_sep, name='temp.TIF', save_path = '.'):
img_dims = np.shape(img)
print('image dimensions: ', img_dims)
if len(img_dims) < 3:
n_chan = 1
content = img
v_min, v_max = np.percentile(content, (1,99))
content_scaled = exposure.rescale_intensity(content, in_range=(v_min, v_max))
else:
# handle if first channel is blank
if np.mean(img[:,:,0]) < 1:
img = img[:,:,1:]
img_dims = np.shape(img)
# handle other blank channels
n_chan = img_dims[2]
base = img[:,:,0]
# restack image, excluding blank channels
for channel in range(1, n_chan):
if np.sum(img[:,:,channel]) > (img_dims[0] * img_dims[1] * 0.2):
base = np.stack((base, img[:,:,channel]), axis=2)
img = base
img_dims = np.shape(img)
n_chan = img_dims[2]
### custom colormaps
N = 256
blank = np.zeros(N)
gray = np.linspace(0, 1, N)
# blue
blues = np.ones((N,4))
blues[:,0] = blank
blues[:,1] = blank
blues[:,2] = gray
blue_cmap = ListedColormap(blues)
# green
greens = np.ones((N,4))
greens[:,0] = blank
greens[:,1] = gray
greens[:,2] = blank
green_cmap = ListedColormap(greens)
# red
reds = np.ones((N,4))
reds[:,0] = gray
reds[:,1] = blank
reds[:,2] = blank
red_cmap = ListedColormap(reds)
# separate and scale channels for vis
content = np.sum(img, axis=2)
v_min, v_max = np.percentile(content, (1,99))
content_scaled = exposure.rescale_intensity(content, in_range=(v_min, v_max))
if n_chan >= 1:
dapi = img[:,:,0]
v_min, v_max = np.percentile(dapi, (1,99))
dapi_scaled = exposure.rescale_intensity(dapi, in_range=(v_min, v_max))
if n_chan >= 2:
gfp = img[:,:,1]
v_min, v_max = np.percentile(gfp, (1,99))
gfp_scaled = exposure.rescale_intensity(gfp, in_range=(v_min, v_max))
if n_chan >= 3:
txred = img[:,:,2]
v_min, v_max = np.percentile(txred, (1,99))
txred_scaled = exposure.rescale_intensity(txred, in_range=(v_min, v_max))
if n_chan == 4:
cy5 = img[:,:,3]
v_min, v_max = | np.percentile(cy5, (1,99)) | numpy.percentile |
"""Collection of routines used to post process (magnetisation)
vector data.
Part of ovf2vtk.
<NAME>, <EMAIL>
# Some helping functions:
#
# Set of functions used for ovf2vtk. General data format is a rank-2
# matrix with the row corresponding to the data in the vector field, and
# the 3-component column corresponding to the magnetisation vector at
# that place.
#
# Together with an index function (last component varying fastest,
# C-style) and positions vectors x_vec, y_vec and z_vec (for three
# dimensional domains), these data can be mapped to positions in real
# space.
#
# All this makes only sense for rectilinear grids.
#
# (fangohr 25/08/2003 00:18)
"""
try:
import numpy as Numeric
except ImportError:
print("This program needs Numpy. Please download and install. \
(http://sourceforge.net/projects/numpy).")
print("If you are using Numeric, you can use the older version \
0.1.17 of ovf2vtk.")
raise ImportError("Couldn't import Numpy -- cannot proceed.")
__version__ = "$Revision: 1.3 $"
def magnitude(vec_array):
"""expects an array of 3D vectors; array of shape (Nx3),
returns the magnitude (standard 2-Norm)"""
# square entries: d*d
# sum over 3-components (axis 1): add.reduce( d*d, 1)
# take square root sqrt( add.reduce ( d*d, 1 ) )
return Numeric.sqrt(Numeric.add.reduce(vec_array ** 2, 1))
def convert_flat_fortran_to_3dmatrix(vf, Nx, Ny, Nz):
"""Takes a 1xN array, vf, and converts it to an array of shape
(Nx, Ny, Nz, 3) -> In Fortan order"""
return Numeric.resize(vf, (Nz, Ny, Nx, 3))
def convert_fortran_3dmatrix_to_flat(M):
"""Takes array of any shape, returns array of shape (1xN)"""
return Numeric.array(M).ravel()
def convert_fortran_3dmatrix_to_flat_vector(M):
"""Takes array of any shape, returns array of shape (X, 3)"""
N = len(Numeric.array(M).ravel())
return Numeric.resize(M, (N/3, 3))
def convert_between_fortran_and_C(a):
"""assuming Fortran data is stored as M[Nz,Ny,Nx,:,...:] and
C data as M[Nx,Ny,Nz,:,,...,:] then this function converts from
one to the other. (fangohr 31/03/2004 23:06)"""
return Numeric.swapaxes(a, 0, 2)
def components(vec_array):
"""returns the x, y and z components of an array of vectors of shape Nx3"""
return (vec_array[:, 0], vec_array[:, 1], vec_array[:, 2])
def plane_angles(vec_array):
"""Input is matrix, containing N 3d vectors.
Returns angles in yx, yz and xz plane for all vectors."""
x, y, z = components(vec_array)
# if any Ms is smaller that 1e-6, then set to zero to eliminate noise
cutoff = 1e-6
Ms = magnitude(vec_array)
x2 = Numeric.choose(Numeric.less(Ms, cutoff), (x, 0.0))
y2 = Numeric.choose(Numeric.less(Ms, cutoff), (y, 0.0))
z2 = Numeric.choose( | Numeric.less(Ms, cutoff) | numpy.less |
# -*- coding: utf-8 -*-
"""
Created on Mon May 20 11:35:23 2019
@author: DiPu
"""
import numpy as np
# taking input 9 space separated digits
numbers=[int(n) for n in input("enter 9 numbers:").split()]
#converting into numpy array
x= | np.array(numbers) | numpy.array |
def line_map(out_filename, filename, extensions, center_wavelength, velocity=0, revise_bounds=False, snr_limit=0,
mcmc=False, **kwargs):
"""
Wrapper function that reads a FITS file and fits an emission
line with a Gaussian with the optional addition of up to a
2nd degree polynomial. It then compiles the fits into a
new FITS file containing the resulting line intensity,
line intensity uncertainty, continuum, velocity, and
FWHM maps.
Parameters
----------
out_filename : string
A string containing the name of the resulting FITS file.
filename : string
A string containing the FITS file to be read.
extensions : list of strings or integers
A list of 3 or 4 string and/or integers containing the name
or index of the extensions to be read. The order of the
list must be 0) the flux data cube extension, 1) the flux
error data cube extension, 2) the array of wavelengths
extension, and optionally 3) the exposure map data cube
extension. If the wavelength array is in a FITS table, a tuple
can be given for the wavelength extension, which gives the
table extension and table column name, respectively.
center_wavelength : scalar
A scalar containing the center wavelength of the line
in microns that is to be fit as if observed in the
rest frame
velocity : scalar, optional
A scalar containing the velocity of the object in km/s.
If not specified a value of 0 is assumed.
revise_bounds : boolean, optional
A boolean that if set to True will refit the data using
an initial fit's parameter ranges as new bounds for the
parameter ranges.
snr_limit : scalar, optional
A scalar which is only used if 'revise_bounds' is True.
It indicates a signal-to-noise level of the
initial fit's line intensity below which
data will not be considered when revising the bounds.
mcmc : bool, optional
A boolean specifying if an MCMC algorithm should be used to
fit the model to the data. The MCMC algorithm uses the default
emcee package (https://emcee.readthedocs.io/en/stable/user/install/).
The initial state of the MCMC chain is the result from the non-linear
least squares fit and the log-probability come from chisqr.
kwargs
Keyword arguments passed to the function line_fitting().
"""
from spec_map_analysis.spectra_fitting import line_fitting
from astropy.io import fits
import numpy as np
from spec_map_analysis.spectra_fitting import file_reader
from copy import deepcopy
# Read in the data and generate headers for output FITS files
# Copy the kwargs dictionary and add in misc keywords for addition to the primary header HISTORY and ease
# of use in file_reader function
kwargs_reader = deepcopy(kwargs)
kwargs_reader['revise_bounds'] = revise_bounds
kwargs_reader['snr_limit'] = snr_limit
kwargs_reader['mcmc'] = mcmc
fitting_data, primary_hdr, image_hdr = file_reader(filename, extensions, center_wavelength,
velocity=velocity, **kwargs_reader)
# Fit the data, and if bounds are to be revised, do not fit with MCMC
if revise_bounds:
line_intensity, parameter = line_fitting(fitting_data, **kwargs)
else:
line_intensity, parameter = line_fitting(fitting_data, mcmc=mcmc, **kwargs)
# If the keyword revise_bounds is set, refit the data using the current fit to further
# restrict the fitting bounds
# Check if number of terms in the fit is specified. If not set to default of 3
if 'nterms' in kwargs:
nterms = kwargs['nterms']
else:
nterms = 3
if revise_bounds:
# Refit the data using the initial fits as better constraints on the Gaussian peak location and
# sigma ranges as to generate better fits.
# Create bounds for each fit parameter based on previous high SNR fits. Exclude those with
# extremely high signal-to-noise as it is likely a artifact of the fitting
snr = line_intensity['INTENSITY'] / line_intensity['UNCERTAINTY']
snr[snr > 1e3] = 0
snr_mask = snr > snr_limit
vel = line_intensity['VELOCITY']
width = line_intensity['FWHM']
# Check if lower bounds were set. If set, use them for peak height, and continuum limits.
# Note: the revised bounds can only reduce the bound range from the input, and cannot expand it
if 'lower_bounds' in kwargs:
lower_bound = kwargs['lower_bounds']
lower = np.array([lower_bound[0], np.nanmin(vel[snr_mask]), np.nanmin(width[snr_mask])])
if nterms >= 4:
lower = np.append(lower, lower_bound[3])
if nterms >= 5:
lower = np.append(lower, lower_bound[4])
if nterms == 6:
lower = | np.append(lower, lower_bound[5]) | numpy.append |
# coding: utf-8
# # Multiclass Support Vector Machine exercise
#
# *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
#
# In this exercise you will:
#
# - implement a fully-vectorized **loss function** for the SVM
# - implement the fully-vectorized expression for its **analytic gradient**
# - **check your implementation** using numerical gradient
# - use a validation set to **tune the learning rate and regularization** strength
# - **optimize** the loss function with **SGD**
# - **visualize** the final learned weights
#
# In[ ]:
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
# ## CIFAR-10 Data Loading and Preprocessing
# In[ ]:
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# In[ ]:
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# In[ ]:
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# In[ ]:
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print('Training data shape: ', X_train.shape)
print('Validation data shape: ', X_val.shape)
print('Test data shape: ', X_test.shape)
print('dev data shape: ', X_dev.shape)
# In[ ]:
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print(mean_image[:10]) # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# In[ ]:
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# In[ ]:
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, | np.ones((X_val.shape[0], 1)) | numpy.ones |
import warnings
import numpy as np
from fireworks import explicit_serialize, Workflow, FireTaskBase, FWAction
from mpmorph.analysis import md_data
from mpmorph.runners.rescale_volume import RescaleVolume, fit_BirchMurnaghanPV_EOS
from mpmorph.util import recursive_update
from pymatgen.core import Structure
from pymatgen.io.vasp import Poscar
from pymatgen.io.vasp.outputs import Vasprun
from scipy import stats
__author__ = '<NAME> and <NAME>'
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
@explicit_serialize
class DiffusionTask(FireTaskBase):
required_params = ['temperatures', 'max_steps', 'target_steps',
'num_samples' 'trajectory_to_db', 'notes']
optional_params = []
def run_task(self, fw_spec):
from mpmorph.workflows.converge import get_converge_wf
vr = Vasprun('vasprun.xml.gz')
fws = []
for t in self['temperatures']:
fws.extend(get_converge_wf(s, int(t), max_steps=self['max_steps'],
target_steps=self['target_steps'],
trajectory_to_db=self['trajectory_to_db'],
notes=self['notes']))
wf = Workflow(fws)
return FWAction(detours=wf)
@explicit_serialize
class ConvergeTask(FireTaskBase):
"""
Ensures a structure is converged before production MD run
"""
required_params = ["converge_params", "run_specs", "md_params"]
optional_params = ["rescale_params", 'tag_id', "optional_fw_params"]
def run_task(self, fw_spec):
from mpmorph.fireworks import powerups
from mpmorph.fireworks.core import MDFW
# Load Structure from Poscar
_poscar = Poscar.from_file("CONTCAR.gz")
structure = _poscar.structure
# Get convergence parameters from spec
converge_params = self["converge_params"]
avg_fraction = converge_params.get("avg_fraction", 0.5)
convergence_vars = dict(converge_params["converge_type"])
if "ionic" not in convergence_vars.keys():
convergence_vars["ionic"] = 0.0005
rescale_params = self.get("rescale_params", {})
# Load Data from OUTCAR
search_keys = ['external', 'kinetic energy EKIN', '% ion-electron', 'ETOTAL']
key_map = {'density': 'external', 'kinetic energy': 'kinetic energy EKIN',
'ionic': '% ion-electron', 'total energy': 'ETOTAL'}
outcar_data = md_data.get_MD_data("./OUTCAR.gz", search_keys=search_keys)
# Check for convergence
converged = {}
_index = search_keys.index(key_map["density"])
_data = np.transpose(outcar_data)[_index].copy()
pressure = np.mean(_data[int(avg_fraction * (len(_data) - 1)):])
if "density" in convergence_vars.keys():
if np.abs(pressure) >= convergence_vars["density"]:
converged["density"] = False
else:
converged["density"] = True
if "kinetic energy" in convergence_vars.keys():
_index = search_keys.index(key_map["kinetic energy"])
energy = np.transpose(outcar_data)[_index].copy()
norm_energy = (energy / structure.num_sites) / np.mean(energy / structure.num_sites) - 1
if np.abs(np.mean(norm_energy[-500:]) - np.mean(norm_energy)) > convergence_vars["kinetic energy"]:
converged["kinetic energy"] = False
else:
converged["kinetic energy"] = True
_index = search_keys.index(key_map["ionic"])
energy = np.transpose(outcar_data)[_index].copy()
norm_energies = energy / structure.num_sites
mu, std = stats.norm.fit(norm_energies)
mu1, std1 = stats.norm.fit(norm_energies[0:int(len(norm_energies) / 2)])
mu2, std2 = stats.norm.fit(norm_energies[int(len(norm_energies) / 2):])
if np.abs((mu2 - mu1) / mu) < convergence_vars["ionic"]:
converged["ionic"] = True
else:
converged["ionic"] = False
# Spawn Additional Fireworks
if not all([item[1] for item in converged.items()]):
density_spawn_count = converge_params["density_spawn_count"]
energy_spawn_count = converge_params["energy_spawn_count"]
max_rescales = converge_params["max_rescales"]
max_energy_runs = 3 # Set max energy convergence runs to default of 3
run_specs = self["run_specs"]
md_params = self["md_params"]
optional_params = self.get("optional_fw_params", {})
tag_id = self.get("tag_id", "")
if density_spawn_count >= max_rescales:
return FWAction(defuse_children=True)
elif energy_spawn_count >= max_energy_runs:
# Too many energy rescales... Just continue with the production runs
return FWAction(stored_data={'pressure': pressure,
'energy': mu,
'density_calculated': True})
elif not converged.get("density", True):
rescale_args = {"initial_pressure": pressure * 1000, "initial_temperature": 1, "beta": 0.0000005}
rescale_args = recursive_update(rescale_args, rescale_params)
# Spawn fw
fw = MDFW(structure, name=f'density_run_{density_spawn_count + 1}-{tag_id}',
previous_structure=False,
**run_specs, **md_params, **optional_params)
converge_params["density_spawn_count"] += 1
_spawner_args = {"converge_params": converge_params, "rescale_params": rescale_params,
"run_specs": run_specs, "md_params": md_params,
"optional_fw_params": optional_params, "tag_id": tag_id}
fw = powerups.add_rescale_volume(fw, **rescale_args)
fw = powerups.add_pass_pv(fw)
fw = powerups.add_converge_task(fw, **_spawner_args)
wf = Workflow([fw])
return FWAction(detours=wf, stored_data={'pressure': pressure, 'energy': mu})
else:
fw = MDFW(structure, name=f'energy_run_{energy_spawn_count + 1}-{tag_id}', previous_structure=False,
**run_specs, **md_params, **optional_params)
converge_params["energy_spawn_count"] += 1
_spawner_args = {"converge_params": converge_params, "rescale_params": rescale_params,
"run_specs": run_specs, "md_params": md_params,
"optional_fw_params": optional_params, "tag_id": tag_id}
fw = powerups.add_pass_pv(fw)
fw = powerups.add_converge_task(fw, **_spawner_args)
wf = Workflow([fw])
return FWAction(detours=wf, stored_data={'pressure': pressure, 'energy': mu})
else:
return FWAction(stored_data={'pressure': pressure,
'energy': mu,
'density_calculated': True})
@explicit_serialize
class RescaleVolumeTask(FireTaskBase):
"""
Volume rescaling
"""
required_params = ["initial_temperature", "initial_pressure"]
optional_params = ["target_pressure", "target_temperature", "target_pressure", "alpha", "beta"]
def run_task(self, fw_spec):
# Initialize volume correction object with last structure from last_run
initial_temperature = self["initial_temperature"]
initial_pressure = self["initial_pressure"]
target_temperature = self.get("target_temperature", initial_temperature)
target_pressure = self.get("target_pressure", 0.0)
alpha = self.get("alpha", 10e-6)
beta = self.get("beta", 10e-7)
corr_vol = RescaleVolume.of_poscar(poscar_path="./POSCAR", initial_temperature=initial_temperature,
initial_pressure=initial_pressure,
target_pressure=target_pressure,
target_temperature=target_temperature, alpha=alpha, beta=beta)
# Rescale volume based on temperature difference first. Const T will return no volume change:
corr_vol.by_thermo(scale='temperature')
# TO DB ("Rescaled volume due to delta T: ", corr_vol.structure.volume)
# Rescale volume based on pressure difference:
corr_vol.by_thermo(scale='pressure')
# TO DB ("Rescaled volume due to delta P: ", corr_vol.structure.volume)
corr_vol.poscar.write_file("./POSCAR")
# Pass the rescaled volume to Poscar
return FWAction(stored_data=corr_vol.structure.as_dict())
@explicit_serialize
class PVRescaleTask(FireTaskBase):
"""
Rescale based on fitting pressure vs volume to Birch-Murnaghan EOS
"""
required_params = []
optional_params = ['rescale_type']
def run_task(self, fw_spec):
rescale_type = self.get('rescale_type', 'BirchMurnaghan_EOS')
if rescale_type == 'BirchMurnaghan_EOS':
pv_pairs = np.array(fw_spec["pressure_volume"])
pv_pairs = np.flip(pv_pairs, axis=1)
pv_pairs = np.flip(pv_pairs[pv_pairs[:, 1].argsort()], axis=0)
try:
params = fit_BirchMurnaghanPV_EOS(pv_pairs)
equil_volume = params[0]
except:
warnings.warn("Could not converge Birch-Murnaghan EOS fit, trying linear regression")
rescale_type = 'linear_regression'
pvs = fw_spec["pressure_volume"]
p = [item[1] for item in pvs]
v = [item[0] for item in pvs]
if rescale_type == 'linear_regression':
slope, intercept, r_value, p_value, std_err = stats.linregress(v, p)
if slope >= 0:
## In future try building a hull with composition and volume. then getting composition volume
raise ValueError("P and V should be inversely related. Try using larger NSW in the volume variation")
equil_volume = -intercept / slope
frac_change = equil_volume / sorted(v)[int(np.floor(len(v) / 2))]
if frac_change > 2 or frac_change < 0.5:
# If volume is greater than 2x or 0.5x, use the lowest pressure volume.
equil_volume = v[ | np.argmin(p) | numpy.argmin |
# pylint: disable=missing-function-docstring, missing-module-docstring/
import numpy as np
if __name__ == '__main__':
# ---------------------------- Array creation ---------------------------------
ac0 = np.array([0])
ac1 = np.array([0, 0, 0, 0])
ac2 = np.array([1, 2, 3, 4, 5])
ac3 = np.array([-1, -2, -3, -4, -5])
print(ac0[0])
for i in range(4):
print(ac1[i])
print()
for i in range(5):
print(ac2[i])
print()
for i in range(5):
print(ac3[i])
print()
# ------------------------------- Array full ----------------------------------
af1 = np.full(5, 2.0, dtype=float)
for i in range(5):
print(af1[i])
print()
af2 = np.full((5, 5), 31254626, dtype=int)
for i in range(5):
for j in range(5):
print(af2[i][j])
print()
af3 = np.full((20, 5), -1.58, dtype=np.double)
for i in range(20):
for j in range(5):
print(af3[i][j])
print()
af4 = np.full((3, 10), 1+2j, dtype=complex)
for i in range(3):
for j in range(10):
print(af4[i][j])
print()
af5 = np.full(5, complex(12.2, 13), dtype=complex)
for i in range(5):
print(af5[i])
print()
# ------------------------------ Array empty ----------------------------------
ao3 = np.ones(10)
for i in range(10):
print(ao3[i])
print()
ao4 = np.ones((2,3))
for i in range(2):
for j in range(3):
print(ao4[i][j])
print()
ae1 = np.empty((2,3))
afl = np.full_like(ae1, 5.21, float)
for i in range(2):
for j in range(3):
print(afl[i][j])
print()
aol = np.ones_like(af4)
for i in range(3):
for j in range(10):
print(aol[i][j])
print()
azl = np.zeros_like(afl)
for i in range(2):
for j in range(3):
print(azl[i][j])
print()
# ------------------------------ Array init ----------------------------------
a = np.array([1,2,3])
b = | np.array(a) | numpy.array |
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import warp
from skimage.transform import SimilarityTransform
from skimage import img_as_ubyte
def read_file(path):
"""Read an optical flow map from disk
Optical flow maps are stored in disk as 3-channel uint16 PNG images,
following the method described in the KITTI optical flow dataset 2012
(http://www.cvlibs.net/datasets/kitti/eval_stereo_flow.php?benchmark=flow).
Returns:
numpy array with shape [height, width, 3]. The first and second channels
denote the corresponding optical flow 2D vector (u, v). The third channel
is a mask denoting if an optical flow 2D vector exists for that pixel.
Vector components u and v values range [-512..512].
"""
data = cv2.imread(path, -1).astype('float32')
result = np.empty(data.shape, dtype='float32')
result[:,:,0] = (data[:,:,2] - 2**15) / 64
result[:,:,1] = (data[:,:,1] - 2**15) / 64
result[:,:,2] = data[:,:,0]
return result
def quantize_map(flow, size):
"""Quantize an optical flow map
The map is divided into non-overlapping square areas and the mean motion
vector is computed for each of them.
Args:
flow: numpy array with the optical flow to quantize
size: (int) size of the square areas to use in the process.
Returns:
Numpy array with shape [new_h, new_w, 3], where:
new_h = int(flow.shape[0] / size)
new_w = int(flow.shape[1] / size)
"""
h, w, n = flow.shape
h_dst = int(h / size)
w_dst = int(w / size)
dst = np.zeros([h_dst, w_dst, n], dtype='float32')
for i in range(h_dst):
for j in range(w_dst):
bin = flow[i*size:(i+1)*size, j*size:(j+1)*size]
valid = bin[:,:,2] == 1
if bin[valid].size > 0:
dst[i, j] = np.mean(bin[valid], axis=0)
return dst
def plot_map(im, flow, size=None, title=''):
"""Plot an optical flow map on top of their corresponding image
Args:
im: (numpy array) numpy array image in grayscale or color.
flow: (numpy array) optical flow map for `im`
size: (optional, int) the size to use in the quantization process. When
specified, the image is divided into non-overlapping square areas, and
the mean optical motion vector is computed for each.
title: (optional, str) plot title.
"""
if size:
flow = quantize_map(flow, size)
start = int(size/2)
else:
start = 0
size = 1
if im.ndim == 3 and im.shape[2] == 3:
im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
im = np.squeeze(im) # grayscale image shapes: [h, w, 1] -> [h, w]
h, w = flow.shape[:2]
x, y = np.meshgrid(np.arange(w) * size + start,
np.arange(h) * size + start)
valid = flow[:,:,2] == 1
fig = plt.figure()
plt.title(title)
plt.imshow(im, cmap='gray')
plt.quiver(x[valid], y[valid], flow[valid][:,0], -flow[valid][:,1],
angles='uv', minlength=0.5, width=0.002, headwidth=4,
color='#ff6600ff')
plt.show()
return fig
def patch_correlation(template, im):
"""Returns: (x, y) displacement vector
"""
# https://docs.opencv.org/3.4.1/df/dfb/group__imgproc__object.html
# TODO: deal with multiple maxima!! It should choose the most centered...
result = cv2.matchTemplate(im, template, cv2.TM_SQDIFF)
displ = np.unravel_index(np.argmin(result), result.shape)
return (displ[1], displ[0])
def block_matching(im1, im2, block_size=16, max_motion=16):
search_area = 2 * max_motion + block_size
block_rows = int(im1.shape[0] / block_size)
block_cols = int(im1.shape[1] / block_size)
# Add extra row / column with the remainder pixels, when large enough
if im1.shape[0] % block_size >= 8:
block_rows += 1
if im1.shape[1] % block_size >= 8:
block_cols += 1
result = np.zeros((im1.shape[0], im1.shape[1], 3), dtype='int16')
for i in range(block_rows):
for j in range(block_cols):
x1 = j * block_size
y1 = i * block_size
xa = x1 - max_motion
ya = y1 - max_motion
patch = im1[y1:y1 + block_size, x1:x1 + block_size]
area = im2[max(ya, 0):(ya + search_area),
max(xa, 0):(xa + search_area)]
x2, y2 = patch_correlation(patch, area)
motion = (max(xa, 0) + x2 - x1, max(ya, 0) + y2 - y1, 1)
result[y1 : y1 + block_size, x1 : x1 + block_size] = motion
return result
def block_matching_sequence(seq, block_size=16, max_motion=16):
n, h, w, _ = seq.shape
result = np.empty((n - 1, h, w, 3), dtype='int16')
for i in range(seq.shape[0] - 1):
result[i] = block_matching(seq[i], seq[i+1], block_size=block_size,
max_motion=max_motion)
return result
def lucas_kanade(im1, im2, wsize, track_specs={}):
track_points = np.mgrid[0:im1.shape[0],
0:im1.shape[1]].swapaxes(0, 2).swapaxes(0, 1).reshape(
(im1.shape[0] * im1.shape[1], 2))
track_points = np.flip(track_points, axis=1)
track_points = track_points.astype(np.float32)
out_points, __, __ = cv2.calcOpticalFlowPyrLK(im1, im2, track_points, None,
winSize=wsize, maxLevel=0,
criteria=
(cv2.TERM_CRITERIA_EPS |
cv2.TERM_CRITERIA_COUNT,
10, 0.03))
track_points = np.flip(track_points, axis=1)
out_points = np.flip(out_points, axis=1)
track_points = track_points.astype(int)
out_points = out_points.astype(int)
shap = list(im1.shape)
shap = shap + [3]
shap = tuple(shap)
flow = np.zeros(shap)
for a, b in zip(track_points, out_points):
bf = np.flip(b, axis=0)
af = np.flip(a, axis=0)
flow[a[0], a[1], 0:2] = bf - af
flow[a[0], a[1], 2] = 1
return flow
def lk_sequence(seq, wsize, track_specs={}):
n, h, w, _ = seq.shape
seq = seq[:, :, :, 0]
result = np.empty((n, h, w, 3), dtype='int16')
for i in range(seq.shape[0] - 1):
result[i] = lucas_kanade(seq[i], seq[i+1], wsize, track_specs)
return result
def lucas_kanade_pyr(im1, im2, wsize, levels, track_specs={}):
track_points = np.mgrid[0:im1.shape[0],
0:im1.shape[1]].swapaxes(0, 2).swapaxes(0, 1).reshape(
(im1.shape[0] * im1.shape[1], 2))
track_points = np.flip(track_points, axis=1)
track_points = track_points.astype(np.float32)
out_points, __, __ = cv2.calcOpticalFlowPyrLK(im1, im2, track_points, None,
winSize=wsize,
maxLevel=levels,
criteria=
(cv2.TERM_CRITERIA_EPS |
cv2.TERM_CRITERIA_COUNT,
10, 0.03))
track_points = np.flip(track_points, axis=1)
out_points = | np.flip(out_points, axis=1) | numpy.flip |
# Renishaw wdf Raman spectroscopy file reader
# Code inspired by Henderson, Alex DOI:10.5281/zenodo.495477
from __future__ import print_function
import struct
import numpy
import io
from .types import LenType, DataType, MeasurementType
from .types import ScanType, UnitType, DataType
from .types import Offsets, ExifTags
from .utils import convert_wl, convert_attr_name
from sys import stderr
try:
import PIL
from PIL import Image
from PIL.TiffImagePlugin import IFDRational
except ImportError:
PIL = None
class WDFReader(object):
"""Reader for Renishaw(TM) WiRE Raman spectroscopy files (.wdf format)
The wdf file format is separated into several DataBlocks, with starting 4-char
strings such as (incomplete list):
`WDF1`: File header for information
`DATA`: Spectra data
`XLST`: Data for X-axis of data, usually the Raman shift or wavelength
`YLST`: Data for Y-axis of data, possibly not important
`WMAP`: Information for mapping, e.g. StreamLine or StreamLineHR mapping
`MAP `: Mapping information(?)
`ORGN`: Data for stage origin
`TEXT`: Annotation text etc
`WXDA`: ? TODO
`WXDM`: ? TODO
`ZLDC`: ? TODO
`BKXL`: ? TODO
`WXCS`: ? TODO
`WXIS`: ? TODO
`WHTL`: Whilte light image
Following the block name, there are two indicators:
Block uid: int32
Block size: int64
Args:
file_name (file) : File object for the wdf file
Attributes:
title (str) : Title of measurement
username (str) : Username
application_name (str) : Default WiRE
application_version (int,) * 4 : Version number, e.g. [4, 4, 0, 6602]
measurement_type (int) : Type of measurement
0=unknown, 1=single, 2=multi, 3=mapping
scan_type (int) : Scan of type, see values in scan_types
laser_wavenumber (float32) : Wavenumber in cm^-1
count (int) : Numbers of experiments (same type), can be smaller than capacity
spectral_units (int) : Unit of spectra, see unit_types
xlist_type (int) : See unit_types
xlist_unit (int) : See unit_types
xlist_length (int): Size for the xlist
xdata (numpy.array): x-axis data
ylist_type (int): Same as xlist_type
ylist_unit (int): Same as xlist_unit
ylist_length (int): Same as xlist_length
ydata (numpy.array): y-data, possibly not used
point_per_spectrum (int): Should be identical to xlist_length
data_origin_count (int) : Number of rows in data origin list
capacity (int) : Max number of spectra
accumulation_count (int) : Single or multiple measurements
block_info (dict) : Info block at least with following keys
DATA, XLST, YLST, ORGN
# TODO types?
"""
def __init__(self, file_name, debug=False):
try:
self.file_obj = open(str(file_name), "rb")
except IOError:
raise IOError("File {0} does noe exist!".format(file_name))
# Initialize the properties for the wdfReader class
self.title = ""
self.username = ""
self.measurement_type = None
self.scan_type = None
self.laser_length = None
self.count = None
self.spectral_unit = None
self.xlist_type = None
self.xlist_unit = None
self.ylist_type = None
self.ylist_unit = None
self.point_per_spectrum = None
self.data_origin_count = None
self.capacity = None
self.application_name = ""
self.application_version = [None]*4
self.xlist_length = 0
self.ylist_length = 0
self.accumulation_count = None
self.block_info = {} # each key has value (uid, offset, size)
self.is_completed = False
self.debug = debug
# Parse the header section in the wdf file
self.__locate_all_blocks()
# Parse individual blocks
self.__treat_block_data("WDF1")
self.__treat_block_data("DATA")
self.__treat_block_data("XLST")
self.__treat_block_data("YLST")
self.__treat_block_data("ORGN")
self.__treat_block_data("WMAP")
self.__treat_block_data("WHTL")
# Reshape spectra after reading mapping information
self.__reshape_spectra()
# self._parse_wmap()
# Finally print the information
if self.debug:
print(("File Metadata").center(80, "="),
file=stderr)
self.print_info(file=stderr)
print("=" * 80, file=stderr)
def close(self):
self.file_obj.close()
if hasattr(self, "img"):
self.img.close()
def __get_type_string(self, attr, data_type):
"""Get the enumerated-data_type as string
"""
val = getattr(self, attr) # No error checking
if data_type is None:
return val
else:
return data_type(val).name
def __read_type(self, type, size=1):
""" Unpack struct data for certain type
"""
if type in ["int16", "int32", "int64", "float", "double"]:
if size > 1:
raise NotImplementedError(
"Does not support read number type with size >1")
# unpack into unsigned values
fmt_out = LenType["s_" + type].value
fmt_in = LenType["l_" + type].value
return struct.unpack(fmt_out, self.file_obj.read(fmt_in * size))[0]
elif type == "utf8":
# Read utf8 string with determined size block
return self.file_obj.read(size).decode("utf8").replace("\x00", "")
else:
raise ValueError("Unknown data length format!")
def __locate_single_block(self, pos):
"""Get block information starting at pos
"""
self.file_obj.seek(pos)
block_name = self.file_obj.read(0x4).decode("ascii")
if len(block_name) < 4:
raise EOFError
block_uid = self.__read_type("int32")
block_size = self.__read_type("int64")
return block_name, block_uid, block_size
def __locate_all_blocks(self):
"""Get information for all data blocks and store them inside self.block_info
"""
curpos = 0
finished = False
while not finished:
try:
block_name, block_uid, block_size = self.__locate_single_block(
curpos)
self.block_info[block_name] = (block_uid, curpos, block_size)
curpos += block_size
except (EOFError, UnicodeDecodeError):
finished = True
def __treat_block_data(self, block_name):
"""Get data according to specific block name
"""
if block_name not in self.block_info.keys():
if self.debug:
print("Block name {0} not present in current measurement".
format(block_name), file=stderr)
return
# parse individual blocks with names
actions = {
"WDF1": ("_parse_header", ()),
"DATA": ("_parse_spectra", ()),
"XLST": ("_parse_xylist", ("X")),
"YLST": ("_parse_xylist", ("Y")),
"ORGN": ("_parse_orgin_list", ()),
"WMAP": ("_parse_wmap", ()),
"WHTL": ("_parse_img", ()),
}
func_name, val = actions[block_name]
getattr(self, func_name)(*val)
# The method for reading the info in the file header
def _parse_header(self):
"""Solve block WDF1
"""
self.file_obj.seek(0) # return to the head
# Must make the conversion under python3
block_ID = self.file_obj.read(Offsets.block_id).decode("ascii")
block_UID = self.__read_type("int32")
block_len = self.__read_type("int64")
# First block must be "WDF1"
if (block_ID != "WDF1") \
or (block_UID != 0 and block_UID != 1) \
or (block_len != Offsets.data_block):
raise ValueError("The wdf file format is incorrect!")
# TODO what are the digits in between?
# The keys from the header
self.file_obj.seek(Offsets.measurement_info) # space
self.point_per_spectrum = self.__read_type("int32")
self.capacity = self.__read_type("int64")
self.count = self.__read_type("int64")
# If count < capacity, this measurement is not completed
self.is_completed = (self.count == self.capacity)
self.accumulation_count = self.__read_type("int32")
self.ylist_length = self.__read_type("int32")
self.xlist_length = self.__read_type("int32")
self.data_origin_count = self.__read_type("int32")
self.application_name = self.__read_type("utf8", 24) # Must be "WiRE"
for i in range(4):
self.application_version[i] = self.__read_type("int16")
self.scan_type = ScanType(self.__read_type("int32"))
self.measurement_type = MeasurementType(self.__read_type("int32"))
# For the units
self.file_obj.seek(Offsets.spectral_info)
self.spectral_unit = UnitType(self.__read_type("int32"))
self.laser_length = convert_wl(self.__read_type("float")) # in nm
# Username and title
self.file_obj.seek(Offsets.file_info)
self.username = self.__read_type("utf8",
Offsets.usr_name -
Offsets.file_info)
self.title = self.__read_type("utf8",
Offsets.data_block -
Offsets.usr_name)
def _parse_xylist(self, dir):
"""Get information from XLST or YLST blocks
"""
if not dir.upper() in ["X", "Y"]:
raise ValueError("Direction argument `dir` must be X or Y!")
name = dir.upper() + "LST"
uid, pos, size = self.block_info[name]
offset = Offsets.block_data
self.file_obj.seek(pos + offset)
setattr(self, "{0}list_type".format(dir.lower()),
DataType(self.__read_type("int32")))
setattr(self, "{0}list_unit".format(dir.lower()),
UnitType(self.__read_type("int32")))
size = getattr(self, "{0}list_length".format(dir.lower()))
if size == 0: # Possibly not started
raise ValueError("{0}-List possibly not initialized!".
format(dir.upper()))
# self.file_obj.seek(pos + offset)
data = numpy.fromfile(self.file_obj, dtype="float32", count=size)
setattr(self, "{0}data".format(dir.lower()), data)
return
def _parse_spectra(self, start=0, end=-1):
"""Get information from DATA block
"""
if end == -1: # take all spectra
end = self.count - 1
if (start not in range(self.count)) or (end not in range(self.count)):
raise ValueError("Wrong start and end indices of spectra!")
if start > end:
raise ValueError("Start cannot be larger than end!")
# Determine start position
uid, pos, size = self.block_info["DATA"]
pos_start = pos + Offsets.block_data + LenType["l_float"].value * \
start * self.point_per_spectrum
n_row = end - start + 1
self.file_obj.seek(pos_start)
spectra_data = numpy.fromfile(
self.file_obj, dtype="float32",
count=n_row * self.point_per_spectrum)
# if len(spectra_data.shape) > 1:
# The spectra is only 1D array
# spectra_data = spectra_data.reshape(
# n_row, spectra_data.size // n_row)
self.spectra = spectra_data
return
def _parse_orgin_list(self):
"""Get information from OriginList
Set the following attributes:
`self.origin_list_header`: 2D-array
`self.origin_list`: origin list
"""
# First confirm origin list type
uid, pos, size = self.block_info["ORGN"]
self.origin_list_header = [[None, ] * 5
for i in range(self.data_origin_count)]
# All possible to have x y and z positions!
self.xpos = numpy.zeros(self.count)
self.ypos = numpy.zeros(self.count)
self.zpos = numpy.zeros(self.count)
list_increment = Offsets.origin_increment + \
LenType.l_double.value * self.capacity
curpos = pos + Offsets.origin_info
for i in range(self.data_origin_count):
self.file_obj.seek(curpos)
p1 = self.__read_type("int32")
p2 = self.__read_type("int32")
s = self.__read_type("utf8", 0x10)
# First index: is the list x, or y pos?
self.origin_list_header[i][0] = (p1 >> 31 & 0b1) == 1
# Second: Data type of the row
self.origin_list_header[i][1] = DataType(p1 & ~(0b1 << 31))
# Third: Unit
self.origin_list_header[i][2] = UnitType(p2)
# Fourth: annotation
self.origin_list_header[i][3] = s
# Last: the actual data
# array = numpy.empty(self.count)
# Time appears to be recorded as int64 in 100 nanosecond intervals
# Possibly using the .NET DateTime epoch
# Reference does not appear to be Unix Epoch time
# Set time[0] = 0 until timestamp reference can be determined
# Resulting array will have unit of `FileTime` in seconds
if self.origin_list_header[i][1] == DataType.Time:
array = numpy.array([self.__read_type("int64")
for i in range(self.count)]) / 1e7
array = array - array[0]
else:
array = numpy.array([self.__read_type("double")
for i in range(self.count)])
self.origin_list_header[i][4] = array
# Set self.xpos or self.ypos
if self.origin_list_header[i][1] == DataType.Spatial_X:
self.xpos = array
self.xpos_unit = self.origin_list_header[i][2]
elif self.origin_list_header[i][1] == DataType.Spatial_Y:
self.ypos = array
self.ypos_unit = self.origin_list_header[i][2]
elif self.origin_list_header[i][1] == DataType.Spatial_Z:
self.zpos = array
self.zpos_unit = self.origin_list_header[i][2]
else:
pass
curpos += list_increment
def _parse_wmap(self):
"""Get information about mapping in StreamLine and StreamLineHR
"""
try:
uid, pos, size = self.block_info["WMAP"]
except KeyError:
if self.debug:
print(("Current measurement does not"
" contain mapping information!"),
file=stderr)
return
self.file_obj.seek(pos + Offsets.wmap_origin)
x_start = self.__read_type("float")
if not numpy.isclose(x_start, self.xpos[0], rtol=1e-4):
raise ValueError("WMAP Xpos is not same as in ORGN!")
y_start = self.__read_type("float")
if not numpy.isclose(y_start, self.ypos[0], rtol=1e-4):
raise ValueError("WMAP Ypos is not same as in ORGN!")
unknown1 = self.__read_type("float")
x_pad = self.__read_type("float")
y_pad = self.__read_type("float")
unknown2 = self.__read_type("float")
spectra_w = self.__read_type("int32")
spectra_h = self.__read_type("int32")
# Determine if the xy-grid spacing is same as in x_pad and y_pad
if (len(self.xpos) > 1) and (len(self.ypos) > 1):
xdist = numpy.abs(self.xpos - self.xpos[0])
ydist = numpy.abs(self.ypos - self.ypos[0])
xdist = xdist[numpy.nonzero(xdist)]
ydist = ydist[numpy.nonzero(ydist)]
# Get minimal non-zero padding in the grid
try:
x_pad_grid = numpy.min(xdist)
except ValueError:
x_pad_grid = 0
try:
y_pad_grid = | numpy.min(ydist) | numpy.min |
import random
import math
from numpy.linalg import inv, det
import matplotlib.pyplot as plt
import numpy as np
from kalman_filter.assignment2.main import update_step, prediction_step
from kalman_filter.assignment2.utils import SAC, TA_AC, NESS, TA_NIS, NIS, plot_ness, plot_nis
from kalman_filter.assignment3.NavigationLab3_Files.generate_data_2D_fun import generate_data_2D_fun
from kalman_filter.statsUtils import gauss_pdf
number_of_samples = 1507
from scipy.stats import norm
from sympy import Symbol, symbols, Matrix, sin, cos, sqrt
from sympy import init_printing
from sympy.utilities.codegen import codegen
init_printing(use_latex=True)
def update_step_extended(x_hat, P_hat, Z, C, R):
"""
Computes the posterior mean X and covariance P of the system state given a new measurement at time step k
This is the measurement update phase or the corrector
:param x_hat: predicted mean(x_hat)
:param P_hat: predicted covariance (P_hat)
:param Z: measurement vector
:param C: measurement matrix
:param R: covariance matrix
:return:
"""
IM = np.dot(C, x_hat) # the Mean of predictive distribution of Y
IS = R + np.dot(C, np.dot(P_hat, C.T)) # the Covariance or predictive mean of Y
K = np.dot(P_hat, np.dot(C.T, inv(IS))) # Kalman Gain matrix
# h[k,0]=np.sqrt(np.dot(xhatminus[k, (0,2)].T, xhatminus[k, (0,2)]))
# h[k,1]=np.arctan2(xhatminus[k, 1], xhatminus[k, 0])
h1 = np.sqrt(np.dot(x_hat[[0, 2]].T, x_hat[[0, 2]]).astype(float))
h2 = np.arctan2(x_hat[1].astype(float), x_hat[0].astype(float))
h = np.array([h1, h2]).astype(float)
X = x_hat + np.dot(K, (Z - h.T).T)
P = np.dot(( | np.identity(4) | numpy.identity |
import os
import random
import sys
from argparse import ArgumentParser, Namespace
from collections import deque
from datetime import datetime
from pathlib import Path
from pprint import pprint
import numpy as np
import psutil
from flatland.envs.malfunction_generators import (MalfunctionParameters,
malfunction_from_params)
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv, RailEnvActions
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.utils.rendertools import RenderTool
from torch.utils.tensorboard import SummaryWriter
from utils.agent_action_config import (get_action_size,
get_flatland_full_action_size,
map_action, map_action_policy,
map_actions, set_action_size_full,
set_action_size_reduced)
from utils.fast_tree_obs import FastTreeObs
from utils.observation_utils import normalize_observation
from utils.timer import Timer
# ! Import our policies
from random_policy import RandomPolicy
from go_forward_policy import GoForwardPolicy
from dddqn import DDDQNPolicy
base_dir = Path(__file__).resolve().parent.parent
sys.path.append(str(base_dir))
try:
import wandb
wandb.init(sync_tensorboard=True)
except ImportError:
print("Install wandb to log to Weights & Biases")
"""
This file shows how to train multiple agents using a reinforcement learning approach.
After training an agent, you can submit it straight away to the NeurIPS 2020 Flatland challenge!
Agent documentation: https://flatland.aicrowd.com/getting-started/rl/multi-agent.html
Submission documentation: https://flatland.aicrowd.com/getting-started/first-submission.html
"""
def create_rail_env(env_params, tree_observation):
n_agents = env_params.n_agents
x_dim = env_params.x_dim
y_dim = env_params.y_dim
n_cities = env_params.n_cities
max_rails_between_cities = env_params.max_rails_between_cities
max_rails_in_city = env_params.max_rails_in_city
seed = env_params.seed
# Break agents from time to time
malfunction_parameters = MalfunctionParameters(
malfunction_rate=env_params.malfunction_rate,
min_duration=20,
max_duration=50
)
return RailEnv(
width=x_dim, height=y_dim,
rail_generator=sparse_rail_generator(
max_num_cities=n_cities,
grid_mode=False,
max_rails_between_cities=max_rails_between_cities,
max_rails_in_city=max_rails_in_city
),
schedule_generator=sparse_schedule_generator(),
number_of_agents=n_agents,
malfunction_generator_and_process_data=malfunction_from_params(
malfunction_parameters),
obs_builder_object=tree_observation,
random_seed=seed
)
def train_agent(train_params, train_env_params, eval_env_params, obs_params):
# Environment parameters
n_agents = train_env_params.n_agents
x_dim = train_env_params.x_dim
y_dim = train_env_params.y_dim
n_cities = train_env_params.n_cities
max_rails_between_cities = train_env_params.max_rails_between_cities
max_rails_in_city = train_env_params.max_rails_in_city
seed = train_env_params.seed
# Unique ID for this training
now = datetime.now()
training_id = now.strftime('%y%m%d%H%M%S')
# Observation parameters
observation_tree_depth = obs_params.observation_tree_depth
observation_radius = obs_params.observation_radius
observation_max_path_depth = obs_params.observation_max_path_depth
# Training parameters
eps_start = train_params.eps_start
eps_end = train_params.eps_end
eps_decay = train_params.eps_decay
n_episodes = train_params.n_episodes
checkpoint_interval = train_params.checkpoint_interval
n_eval_episodes = train_params.n_evaluation_episodes
restore_replay_buffer = train_params.restore_replay_buffer
save_replay_buffer = train_params.save_replay_buffer
# Set the seeds
random.seed(seed)
np.random.seed(seed)
# Observation builder
predictor = ShortestPathPredictorForRailEnv(observation_max_path_depth)
if not train_params.use_fast_tree_observation:
print("\nUsing standard TreeObs")
def check_is_observation_valid(observation):
return observation
def get_normalized_observation(observation, tree_depth: int, observation_radius=0):
return normalize_observation(observation, tree_depth, observation_radius)
tree_observation = TreeObsForRailEnv(
max_depth=observation_tree_depth, predictor=predictor)
tree_observation.check_is_observation_valid = check_is_observation_valid
tree_observation.get_normalized_observation = get_normalized_observation
else:
print("\nUsing FastTreeObs")
def check_is_observation_valid(observation):
return True
def get_normalized_observation(observation, tree_depth: int, observation_radius=0):
return observation
tree_observation = FastTreeObs(max_depth=observation_tree_depth)
tree_observation.check_is_observation_valid = check_is_observation_valid
tree_observation.get_normalized_observation = get_normalized_observation
# Setup the environments
train_env = create_rail_env(train_env_params, tree_observation)
train_env.reset(regenerate_schedule=True, regenerate_rail=True)
eval_env = create_rail_env(eval_env_params, tree_observation)
eval_env.reset(regenerate_schedule=True, regenerate_rail=True)
if not train_params.use_fast_tree_observation:
# Calculate the state size given the depth of the tree observation and the number of features
n_features_per_node = train_env.obs_builder.observation_dim
n_nodes = sum([np.power(4, i)
for i in range(observation_tree_depth + 1)])
state_size = n_features_per_node * n_nodes
else:
# Calculate the state size given the depth of the tree observation and the number of features
state_size = tree_observation.observation_dim
action_count = [0] * get_flatland_full_action_size()
action_dict = dict()
agent_obs = [None] * n_agents
agent_prev_obs = [None] * n_agents
agent_prev_action = [2] * n_agents
update_values = [False] * n_agents
# Smoothed values used as target for hyperparameter tuning
smoothed_eval_normalized_score = -1.0
smoothed_eval_completion = 0.0
# todo smooth when rendering instead
scores_window = deque(maxlen=checkpoint_interval)
completion_window = deque(maxlen=checkpoint_interval)
if train_params.action_size == "reduced":
set_action_size_reduced()
else:
set_action_size_full()
# ! Add Policies here
if train_params.policy == "Random":
policy = RandomPolicy(state_size, get_action_size(), train_params)
elif train_params.policy == "GoForward":
policy = GoForwardPolicy(state_size, get_action_size(), train_params)
elif train_params.policy == "dddqn":
policy = DDDQNPolicy(state_size, get_action_size(), train_params)
# Default policy random
if train_params.policy is None:
policy = GoForwardPolicy(state_size, get_action_size(), train_params)
# Load existing policy
if train_params.load_policy != "":
policy.load(train_params.load_policy)
# Loads existing replay buffer
if restore_replay_buffer:
try:
policy.load_replay_buffer(restore_replay_buffer)
policy.test()
except RuntimeError as e:
print(
"\n🛑 Could't load replay buffer, were the experiences generated using the same tree depth?")
print(e)
exit(1)
print("\n💾 Replay buffer status: {}/{} experiences".format(
len(policy.memory.memory), train_params.buffer_size))
hdd = psutil.disk_usage('/')
if save_replay_buffer and (hdd.free / (2 ** 30)) < 500.0:
print(
"⚠️ Careful! Saving replay buffers will quickly consume a lot of disk space. You have {:.2f}gb left.".format(
hdd.free / (2 ** 30)))
# TensorBoard writer
writer = SummaryWriter(
comment="_" + train_params.policy + "_" + train_params.action_size)
training_timer = Timer()
training_timer.start()
print(
"\n🚉 Training {} trains on {}x{} grid for {} episodes, evaluating on {} episodes every {} episodes. Training id '{}'.\n".format(
train_env.get_num_agents(),
x_dim, y_dim,
n_episodes,
n_eval_episodes,
checkpoint_interval,
training_id
))
for episode_idx in range(n_episodes + 1):
step_timer = Timer()
reset_timer = Timer()
learn_timer = Timer()
preproc_timer = Timer()
inference_timer = Timer()
# Reset environment
reset_timer.start()
if train_params.n_agent_fixed:
number_of_agents = n_agents
train_env_params.n_agents = n_agents
else:
number_of_agents = int(
min(n_agents, 1 + np.floor(episode_idx / 5))) # ! Changed from 200
train_env_params.n_agents = episode_idx % number_of_agents + 1
train_env = create_rail_env(train_env_params, tree_observation)
obs, info = train_env.reset(
regenerate_rail=True, regenerate_schedule=True)
policy.reset(train_env)
reset_timer.end()
if train_params.render:
# Setup renderer
env_renderer = RenderTool(train_env, gl="PGL")
env_renderer.set_new_rail()
score = 0
nb_steps = 0
actions_taken = []
# Build initial agent-specific observations
for agent_handle in train_env.get_agent_handles():
if tree_observation.check_is_observation_valid(obs[agent_handle]):
agent_obs[agent_handle] = tree_observation.get_normalized_observation(obs[agent_handle],
observation_tree_depth,
observation_radius=observation_radius)
agent_prev_obs[agent_handle] = agent_obs[agent_handle].copy()
# Max number of steps per episode
# This is the official formula used during evaluations
# See details in flatland.envs.schedule_generators.sparse_schedule_generator
# max_steps = int(4 * 2 * (env.height + env.width + (n_agents / n_cities)))
max_steps = train_env._max_episode_steps
# Run episode
policy.start_episode(train=True)
for step in range(max_steps - 1):
inference_timer.start()
policy.start_step(train=True)
for agent_handle in train_env.get_agent_handles():
agent = train_env.agents[agent_handle]
if info['action_required'][agent_handle]:
update_values[agent_handle] = True
action = policy.act(
agent_handle, agent_obs[agent_handle], eps=eps_start)
action_count[map_action(action)] += 1
actions_taken.append(map_action(action))
else:
# An action is not required if the train hasn't joined the railway network,
# if it already reached its target, or if is currently malfunctioning.
update_values[agent_handle] = False
action = 0
action_dict.update({agent_handle: action})
policy.end_step(train=True)
inference_timer.end()
# Environment step
step_timer.start()
next_obs, all_rewards, done, info = train_env.step(
map_actions(action_dict))
step_timer.end()
# Render an episode at some interval
if train_params.render:
env_renderer.render_env(
show=True,
frames=False,
show_observations=False,
show_predictions=False
)
# Update replay buffer and train agent
for agent_handle in train_env.get_agent_handles():
if update_values[agent_handle] or done['__all__']:
# Only learn from timesteps where somethings happened
learn_timer.start()
policy.step(agent_handle,
agent_prev_obs[agent_handle],
map_action_policy(
agent_prev_action[agent_handle]),
all_rewards[agent_handle],
agent_obs[agent_handle],
done[agent_handle])
learn_timer.end()
agent_prev_obs[agent_handle] = agent_obs[agent_handle].copy()
agent_prev_action[agent_handle] = action_dict[agent_handle]
# Preprocess the new observations
if tree_observation.check_is_observation_valid(next_obs[agent_handle]):
preproc_timer.start()
agent_obs[agent_handle] = tree_observation.get_normalized_observation(next_obs[agent_handle],
observation_tree_depth,
observation_radius=observation_radius)
preproc_timer.end()
score += all_rewards[agent_handle]
nb_steps = step
if done['__all__']:
break
policy.end_episode(train=True)
# Epsilon decay
eps_start = max(eps_end, eps_decay * eps_start)
# Collect information about training
tasks_finished = sum(done[idx]
for idx in train_env.get_agent_handles())
completion = tasks_finished / max(1, train_env.get_num_agents())
normalized_score = score / (max_steps * train_env.get_num_agents())
action_probs = action_count / max(1, np.sum(action_count))
scores_window.append(normalized_score)
completion_window.append(completion)
smoothed_normalized_score = np.mean(scores_window)
smoothed_completion = np.mean(completion_window)
if train_params.render:
env_renderer.close_window()
# Print logs
if episode_idx % checkpoint_interval == 0 and episode_idx > 0:
policy.save('./checkpoints/' + training_id +
'-' + str(episode_idx) + '.pth')
if save_replay_buffer:
policy.save_replay_buffer(
'./replay_buffers/' + training_id + '-' + str(episode_idx) + '.pkl')
# reset action count
action_count = [0] * get_flatland_full_action_size()
print(
'\r🚂 Episode {}'
'\t 🚉 nAgents {:2}/{:2}'
' 🏆 Score: {:7.3f}'
' Avg: {:7.3f}'
'\t 💯 Done: {:6.2f}%'
' Avg: {:6.2f}%'
'\t 🎲 Epsilon: {:.3f} '
'\t 🔀 Action Probs: {}'.format(
episode_idx,
train_env_params.n_agents, number_of_agents,
normalized_score,
smoothed_normalized_score,
100 * completion,
100 * smoothed_completion,
eps_start,
format_action_prob(action_probs)
), end=" ")
# Evaluate policy and log results at some interval
if episode_idx % checkpoint_interval == 0 and n_eval_episodes > 0:
scores, completions, nb_steps_eval = eval_policy(eval_env,
tree_observation,
policy,
train_params,
obs_params,
episode_idx)
writer.add_scalar("evaluation/scores_min",
np.min(scores), episode_idx)
writer.add_scalar("evaluation/scores_max",
np.max(scores), episode_idx)
writer.add_scalar("evaluation/scores_mean",
np.mean(scores), episode_idx)
writer.add_scalar("evaluation/scores_std",
np.std(scores), episode_idx)
writer.add_histogram("evaluation/scores",
np.array(scores), episode_idx)
writer.add_scalar("evaluation/completions_min",
np.min(completions), episode_idx)
writer.add_scalar("evaluation/completions_max",
np.max(completions), episode_idx)
writer.add_scalar("evaluation/completions_mean",
np.mean(completions), episode_idx)
writer.add_scalar("evaluation/completions_std",
np.std(completions), episode_idx)
writer.add_histogram("evaluation/completions",
np.array(completions), episode_idx)
writer.add_scalar("evaluation/nb_steps_min",
np.min(nb_steps_eval), episode_idx)
writer.add_scalar("evaluation/nb_steps_max",
np.max(nb_steps_eval), episode_idx)
writer.add_scalar("evaluation/nb_steps_mean",
np.mean(nb_steps_eval), episode_idx)
writer.add_scalar("evaluation/nb_steps_std",
np.std(nb_steps_eval), episode_idx)
writer.add_histogram("evaluation/nb_steps",
np.array(nb_steps_eval), episode_idx)
smoothing = 0.9
smoothed_eval_normalized_score = smoothed_eval_normalized_score * smoothing + np.mean(scores) * (
1.0 - smoothing)
smoothed_eval_completion = smoothed_eval_completion * \
smoothing + np.mean(completions) * (1.0 - smoothing)
writer.add_scalar("evaluation/smoothed_score",
smoothed_eval_normalized_score, episode_idx)
writer.add_scalar("evaluation/smoothed_completion",
smoothed_eval_completion, episode_idx)
# Save logs to tensorboard
writer.add_scalar("training/score", normalized_score, episode_idx)
writer.add_scalar("training/smoothed_score",
smoothed_normalized_score, episode_idx)
writer.add_scalar("training/completion",
np.mean(completion), episode_idx)
writer.add_scalar("training/smoothed_completion",
np.mean(smoothed_completion), episode_idx)
writer.add_scalar("training/nb_steps", nb_steps, episode_idx)
writer.add_scalar("training/n_agents",
train_env_params.n_agents, episode_idx)
writer.add_histogram("actions/distribution",
np.array(actions_taken), episode_idx)
writer.add_scalar("actions/nothing",
action_probs[RailEnvActions.DO_NOTHING], episode_idx)
writer.add_scalar(
"actions/left", action_probs[RailEnvActions.MOVE_LEFT], episode_idx)
writer.add_scalar(
"actions/forward", action_probs[RailEnvActions.MOVE_FORWARD], episode_idx)
writer.add_scalar(
"actions/right", action_probs[RailEnvActions.MOVE_RIGHT], episode_idx)
writer.add_scalar(
"actions/stop", action_probs[RailEnvActions.STOP_MOVING], episode_idx)
writer.add_scalar("training/epsilon", eps_start, episode_idx)
writer.add_scalar("training/buffer_size",
len(policy.memory), episode_idx)
writer.add_scalar("training/loss", policy.loss, episode_idx)
writer.add_scalar("timer/reset", reset_timer.get(), episode_idx)
writer.add_scalar("timer/step", step_timer.get(), episode_idx)
writer.add_scalar("timer/learn", learn_timer.get(), episode_idx)
writer.add_scalar("timer/preproc", preproc_timer.get(), episode_idx)
writer.add_scalar(
"timer/total", training_timer.get_current(), episode_idx)
writer.flush()
def format_action_prob(action_probs):
action_probs = np.round(action_probs, 3)
actions = ["↻", "←", "↑", "→", "◼"]
buffer = ""
for action, action_prob in zip(actions, action_probs):
buffer += action + " " + "{:.3f}".format(action_prob) + " "
return buffer
def eval_policy(env, tree_observation, policy, train_params, obs_params, eps):
print(eps)
n_eval_episodes = train_params.n_evaluation_episodes
# max_steps = 50
max_steps = env._max_episode_steps
tree_depth = obs_params.observation_tree_depth
observation_radius = obs_params.observation_radius
print(max_steps)
action_dict = dict()
scores = []
completions = []
nb_steps = []
prev_obs = [None] * env.get_num_agents()
for episode_idx in range(n_eval_episodes):
agent_obs = [None] * env.get_num_agents()
score = 0.0
obs, info = env.reset(regenerate_rail=True, regenerate_schedule=True)
policy.reset(env)
final_step = 0
# Build initial obs
for agent in env.get_agent_handles():
if obs[agent] is not None:
agent_obs[agent] = obs[agent]
prev_obs[agent] = obs[agent]
if episode_idx % 2 == 0:
env_renderer = RenderTool(env, gl="PGL")
env_renderer.set_new_rail()
policy.start_episode(train=False)
for step in range(max_steps - 1):
policy.start_step(train=False)
# print(sum(x is None for x in prev_obs))
for agent in env.get_agent_handles():
if obs[agent] is not None:
prev_obs[agent] = obs[agent]
agent_obs[agent] = tree_observation.get_normalized_observation(obs[agent], tree_depth=tree_depth, observation_radius=observation_radius)
if obs[agent] is None:
# print(f"{agent} has NONE %%%%%%%%%%%%%%")
agent_obs[agent] = tree_observation.get_normalized_observation(prev_obs[agent], tree_depth=tree_depth, observation_radius=observation_radius)
action = 0
if info['action_required'][agent]:
action = policy.act(agent, agent_obs[agent], eps=0.0)
action_dict.update({agent: action})
policy.end_step(train=False)
obs, all_rewards, done, info = env.step(map_action(action_dict))
# print(action_dict)
if episode_idx % 2 == 0:
env_renderer.render_env(
show=True,
frames=False,
show_observations=False,
show_predictions=True
)
# time.sleep(2)
for agent in env.get_agent_handles():
score += all_rewards[agent]
final_step = step
if done['__all__']:
break
policy.end_episode(train=False)
normalized_score = score / (max_steps * env.get_num_agents())
scores.append(normalized_score)
tasks_finished = sum(done[idx] for idx in env.get_agent_handles())
completion = tasks_finished / max(1, env.get_num_agents())
completions.append(completion)
nb_steps.append(final_step)
if episode_idx % 2 == 0:
env_renderer.close_window()
print(" ✅ Eval: score {:.3f} done {:.1f}%".format(
| np.mean(scores) | numpy.mean |
# A python implementation of Ailey's matlab tensor code.
import os
import numpy as np
import math
import SimpleITK as sitk
from scipy import ndimage
import nibabel as nib
from PIL import Image
import scipy.misc
from scipy import signal
import warnings
warnings.filterwarnings("ignore")
def doggen(sigma):
"""
Helper function to generate derivatives of Gaussian kernels, in either 1D, 2D, or 3D.
Source code in MATLAB obtained from <NAME>, Stanford University, September 2015
:param sigma: Sigma for use (see defaults in generate_FSL_structure_tensor)
:return: Derivative of Gaussian kernel with dimensions of sigma.
"""
halfsize = np.ceil(3 * np.max(sigma))
x = range(np.single(-halfsize), np.single(halfsize + 1)); # Python colon is not inclusive at end, while MATLAB is.
dim = len(sigma);
if dim == 1:
X = np.array(x); # Remember that, by default, numpy arrays are elementwise multiplicative
X = X.astype(float);
k = -X * np.exp(-X ** 2 / (2 * sigma ** 2));
elif dim == 2:
[X, Y] = np.meshgrid(x, x);
X = X.astype(float);
Y = Y.astype(float);
k = -X * np.exp(-X ** 2 / (2 * sigma[0] ^ 2) * np.exp(-Y ** 2))
elif dim == 3:
[X, Y, Z] = np.meshgrid(x, x, x);
X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...)
Y = Y.transpose(2, 0, 1);
Z = Z.transpose(2, 1, 0);
X = X.astype(float);
Y = Y.astype(float);
Z = Z.astype(float);
k = -X * np.exp(np.divide(-np.power(X, 2), 2 * np.power(sigma[0], 2))) * np.exp(
np.divide(-np.power(Y, 2), 2 * np.power(sigma[1], 2))) * np.exp(
np.divide(-np.power(Z, 2), 2 * np.power(sigma[2], 2)))
else:
print
'Only supports up to 3 dimensions'
return np.divide(k, np.sum(np.abs(k[:])));
def gaussgen(sigma):
"""
Function to generate Gaussian kernels, in 1D, 2D and 3D.
Source code in MATLAB obtained from <NAME>, Stanford University, September 2015
:param sigma: Sigma for use in generating Gaussian kernel (see defaults in generate_FSL_structure_tensor)
:return: Gaussian kernel with dimensions of sigma.
"""
halfsize = np.ceil(3 * max(sigma));
x = range(np.single(-halfsize), np.single(halfsize + 1));
dim = len(sigma);
if dim == 1:
x = x.astype(float);
k = np.exp(-x ** 2 / (2 * sigma ^ 2));
elif dim == 2:
[X, Y] = np.meshgrid(x, x);
X = X.astype(float);
Y = Y.astype(float);
k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2));
elif dim == 3:
[X, Y, Z] = | np.meshgrid(x, x, x) | numpy.meshgrid |
import matplotlib.pyplot as plt
import os
import numpy as np
from datetime import datetime
from matplotlib.backends.backend_pdf import PdfPages
from emma.io.traceset import TraceSet
from emma.utils.utils import MaxPlotsReached, EMMAException
#plt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.get_cmap('flag').colors) # Use different cycling colors
#plt.style.use('bmh') # Use different style
def plt_save_pdf(path):
"""
Save plot as pdf to path
:param path:
:return:
"""
pp = PdfPages(path)
pp.savefig(dpi=300)
pp.close()
plt.clf()
plt.cla()
def plot_spectogram(trace_set,
sample_rate,
nfft=2**10,
noverlap=0,
cmap='plasma',
params=None,
num_traces=1024):
if not trace_set.windowed:
raise EMMAException("Trace set should be windowed")
# Check params
if params is not None:
if len(params) == 1:
nfft = int(params[0])
elif len(params) == 2:
nfft = int(params[0])
noverlap = int(nfft * int(params[1]) / 100.0)
all_signals = np.array([trace.signal for trace in trace_set.traces[0:num_traces]]).flatten()
"""
# Old style
for trace in trace_set.traces[0:num_traces]:
plt.specgram(trace.signal, NFFT=nfft, Fs=sample_rate, noverlap=noverlap, cmap=cmap)
"""
plt.specgram(all_signals, NFFT=nfft, Fs=sample_rate, noverlap=noverlap, cmap=cmap, mode='psd', scale='dB')
plt.tight_layout()
plt.show()
def plot_colormap(inputs,
show=True,
cmap='inferno',
draw_axis=True,
title='',
xlabel='',
ylabel='',
colorbar_label='',
save=False,
**kwargs):
"""
Plot signals given in the inputs numpy array in a colormap.
:param inputs:
:param show:
:param cmap:
:param draw_axis:
:param title:
:param cmap:
:param xlabel:
:param ylabel:
:param colorbar_label:
:param save:
:param kwargs:
:return:
"""
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if inputs.dtype == np.complex64 or inputs.dtype == np.complex128:
inputs = np.real(inputs)
print("Warning: converting colormap to np.real(complex)")
#inputs += 0.01
vmin = inputs.min()
vmax = inputs.max()
colorplot = plt.imshow(inputs,
vmin=vmin,
vmax=vmax,
interpolation='nearest',
# norm=LogNorm(vmin=vmin, vmax=vmax),
cmap=cmap,
**kwargs)
if draw_axis:
# https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
from mpl_toolkits.axes_grid1 import make_axes_locatable
axis = plt.gca()
figure = plt.gcf()
divider = make_axes_locatable(axis)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = figure.colorbar(colorplot, cax=cax)
cbar.set_label(colorbar_label)
plt.tight_layout()
if save:
if title:
plt_save_pdf('/tmp/%s.pdf' % title)
else:
plt_save_pdf('/tmp/%s.pdf' % str(datetime.now()))
if show:
plt.show()
def _get_x_axis_values(signal, time_domain=True, sample_rate=1.0):
if not time_domain:
freqs = np.fft.fftfreq(len(signal), d=1.0/sample_rate)
x = np.fft.fftshift(freqs)
else:
x = range(0, len(signal))
return x
def plot_trace_sets(reference_signal,
trace_sets,
params=None,
no_reference_plot=False,
num_traces=1024,
title='',
xlabel='',
ylabel='',
colorbar_label='',
time_domain=True,
sample_rate=1.0):
"""
Plot num_traces signals from a list of trace sets using matplotlib
"""
saveplot = False
colormap = False
# Check params
if params is not None:
if len(params) >= 1:
if 'save' in params:
saveplot = True
if '2d' in params:
colormap = True
if not isinstance(trace_sets, list) or isinstance(trace_sets, TraceSet):
raise ValueError("Expected list of TraceSets")
if len(trace_sets) == 0:
return
# Make title
common_path = os.path.commonprefix([trace_set.name for trace_set in trace_sets])
if title == '':
title = "%d trace sets from %s" % (len(trace_sets), common_path)
if reference_signal.dtype == np.complex64 or reference_signal.dtype == np.complex128:
title += " (complex, only real values plotted)"
# Make plots
count = 0
all_signals = []
try:
for trace_set in trace_sets:
for trace in trace_set.traces:
all_signals.append(trace.signal)
count += 1
if count >= num_traces:
raise MaxPlotsReached
except MaxPlotsReached:
pass
finally:
if xlabel == '':
if time_domain:
xlabel = 'Samples'
else:
xlabel = 'Frequency (assuming sample rate %.2f)' % sample_rate
if colormap:
plot_colormap(np.array(all_signals),
show=False,
title=title,
xlabel=xlabel,
ylabel=ylabel,
colorbar_label=colorbar_label)
else:
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
for signal in all_signals:
x = _get_x_axis_values(signal, sample_rate=sample_rate, time_domain=time_domain)
plt.plot(x, signal)
if not no_reference_plot:
x = _get_x_axis_values(reference_signal, sample_rate=sample_rate, time_domain=time_domain)
plt.plot(x, reference_signal, linewidth=2, linestyle='dashed')
if saveplot:
plt_save_pdf('/tmp/plotted_trace_sets.pdf')
plt.clf()
else:
plt.show()
def plot_correlations(values1, values2, label1="", label2="", show=False):
values1 = | np.reshape(values1, (-1,)) | numpy.reshape |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`fit`
==================
.. module:: fit
:synopsis:
.. moduleauthor:: hbldh <<EMAIL>>
Created on 2015-09-24, 07:18:22
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
from b2ac.compat import *
import b2ac.matrix.matrix_operations as mo
import b2ac.eigenmethods.qr_algorithm as qr
import b2ac.eigenmethods.inverse_iteration as inv_iter
def fit_improved_B2AC_double(points):
"""Ellipse fitting in Python with improved B2AC algorithm as described in
this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.
This version of the fitting uses float storage during calculations and performs the
eigensolver on a float array. It only uses `b2ac` package methods for fitting, to
be as similar to the integer implementation as possible.
:param points: The [Nx2] array of points to fit ellipse to.
:type points: :py:class:`numpy.ndarray`
:return: The conic section array defining the fitted ellipse.
:rtype: :py:class:`numpy.ndarray`
"""
e_conds = []
points = np.array(points, 'float')
M, T = _calculate_M_and_T_double(points)
e_vals = sorted(qr.QR_algorithm_shift_Givens_double(M)[0])
a = None
for ev_ind in [1, 2, 0]:
# Find the eigenvector that matches this eigenvector.
eigenvector = inv_iter.inverse_iteration_for_eigenvector_double(M, e_vals[ev_ind], 5)
# See if that eigenvector yields an elliptical solution.
elliptical_condition = (4 * eigenvector[0] * eigenvector[2]) - (eigenvector[1] ** 2)
e_conds.append(elliptical_condition)
if elliptical_condition > 0:
a = eigenvector
break
if a is None:
print("Eigenvalues = {0}".format(e_vals))
print("Elliptical conditions = {0}".format(e_conds))
raise ArithmeticError("No elliptical solution found.")
conic_coefficients = np.concatenate((a, np.dot(T, a)))
return conic_coefficients
def _calculate_M_and_T_double(points):
"""Part of the B2AC ellipse fitting algorithm, calculating the M and T
matrices needed.
:param points: The [Nx2] array of points to fit ellipse to.
:type points: :py:class:`numpy.ndarray`
:return: Matrices M and T.
:rtype: tuple
"""
S = _calculate_scatter_matrix_double(points[:, 0], points[:, 1])
S1 = S[:3, :3]
S3 = | np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]]) | numpy.array |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Widget providing a set of tools to draw masks on a PlotWidget.
This widget is meant to work with :class:`silx.gui.plot.PlotWidget`.
- :class:`Mask`: Handle mask bitmap update and history
- :class:`MaskToolsWidget`: GUI for :class:`Mask`
- :class:`MaskToolsDockWidget`: DockWidget to integrate in :class:`PlotWindow`
"""
from __future__ import division
__authors__ = ["<NAME>"]
__license__ = "MIT"
__data__ = "08/06/2016"
import os
import sys
import numpy
import logging
from silx.image import shapes
from .Colors import cursorColorForColormap, rgba
from .. import icons, qt
from silx.third_party.EdfFile import EdfFile
from silx.third_party.TiffIO import TiffIO
try:
import fabio
except ImportError:
fabio = None
_logger = logging.getLogger(__name__)
class Mask(qt.QObject):
"""A mask field with update operations.
Coords follows (row, column) convention and are in mask array coords.
This is meant for internal use by :class:`MaskToolsWidget`.
"""
sigChanged = qt.Signal()
"""Signal emitted when the mask has changed"""
sigUndoable = qt.Signal(bool)
"""Signal emitted when undo becomes possible/impossible"""
sigRedoable = qt.Signal(bool)
"""Signal emitted when redo becomes possible/impossible"""
def __init__(self):
self.historyDepth = 10
"""Maximum number of operation stored in history list for undo"""
self._mask = numpy.array((), dtype=numpy.uint8) # Store the mask
# Init lists for undo/redo
self._history = []
self._redo = []
super(Mask, self).__init__()
def _notify(self):
"""Notify of mask change."""
self.sigChanged.emit()
def getMask(self, copy=True):
"""Get the current mask as a 2D array.
:param bool copy: True (default) to get a copy of the mask.
If False, the returned array MUST not be modified.
:return: The array of the mask with dimension of the 'active' image.
If there is no active image, an empty array is returned.
:rtype: 2D numpy.ndarray of uint8
"""
return numpy.array(self._mask, copy=copy)
def setMask(self, mask, copy=True):
"""Set the mask to a new array.
:param numpy.ndarray mask: The array to use for the mask.
:type mask: numpy.ndarray of uint8 of dimension 2, C-contiguous.
Array of other types are converted.
:param bool copy: True (the default) to copy the array,
False to use it as is if possible.
"""
assert len(mask.shape) == 2
self._mask = numpy.array(mask, copy=copy, order='C', dtype=numpy.uint8)
self._notify()
def save(self, filename, kind):
"""Save current mask in a file
:param str filename: The file where to save to mask
:param str kind: The kind of file to save in 'edf', 'tif', 'npy',
or 'msk' (if FabIO is installed)
:raise Exception: Raised if the file writing fail
"""
if kind == 'edf':
edfFile = EdfFile(filename, access="w+")
edfFile.WriteImage({}, self.getMask(copy=False), Append=0)
elif kind == 'tif':
tiffFile = TiffIO(filename, mode='w')
tiffFile.writeImage(self.getMask(copy=False), software='silx')
elif kind == 'npy':
try:
numpy.save(filename, self.getMask(copy=False))
except IOError:
raise RuntimeError("Mask file can't be written")
elif kind == 'msk':
if fabio is None:
raise ImportError("Fit2d mask files can't be written: Fabio module is not available")
try:
data = self.getMask(copy=False)
image = fabio.fabioimage.FabioImage(data=data)
image = image.convert(fabio.fit2dmaskimage.Fit2dMaskImage)
image.save(filename)
except Exception:
_logger.debug("Backtrace", exc_info=True)
raise RuntimeError("Mask file can't be written")
else:
raise ValueError("Format '%s' is not supported" % kind)
# History control
def resetHistory(self):
"""Reset history"""
self._history = [numpy.array(self._mask, copy=True)]
self._redo = []
self.sigUndoable.emit(False)
self.sigRedoable.emit(False)
def commit(self):
"""Append the current mask to history if changed"""
if (not self._history or self._redo or
not numpy.all(numpy.equal(self._mask, self._history[-1]))):
if self._redo:
self._redo = [] # Reset redo as a new action as been performed
self.sigRedoable[bool].emit(False)
while len(self._history) >= self.historyDepth:
self._history.pop(0)
self._history.append(numpy.array(self._mask, copy=True))
if len(self._history) == 2:
self.sigUndoable.emit(True)
def undo(self):
"""Restore previous mask if any"""
if len(self._history) > 1:
self._redo.append(self._history.pop())
self._mask = numpy.array(self._history[-1], copy=True)
self._notify() # Do not store this change in history
if len(self._redo) == 1: # First redo
self.sigRedoable.emit(True)
if len(self._history) == 1: # Last value in history
self.sigUndoable.emit(False)
def redo(self):
"""Restore previously undone modification if any"""
if self._redo:
self._mask = self._redo.pop()
self._history.append(numpy.array(self._mask, copy=True))
self._notify()
if not self._redo: # No more redo
self.sigRedoable.emit(False)
if len(self._history) == 2: # Something to undo
self.sigUndoable.emit(True)
# Whole mask operations
def clear(self, level):
"""Set all values of the given mask level to 0.
:param int level: Value of the mask to set to 0.
"""
assert 0 < level < 256
self._mask[self._mask == level] = 0
self._notify()
def reset(self, shape=None):
"""Reset the mask to zero and change its shape
:param shape: Shape of the new mask or None to have an empty mask
:type shape: 2-tuple of int
"""
if shape is None:
shape = 0, 0 # Empty 2D array
assert len(shape) == 2
shapeChanged = (shape != self._mask.shape)
self._mask = numpy.zeros(shape, dtype=numpy.uint8)
if shapeChanged:
self.resetHistory()
self._notify()
def invert(self, level):
"""Invert mask of the given mask level.
0 values become level and level values become 0.
:param int level: The level to invert.
"""
assert 0 < level < 256
masked = self._mask == level
self._mask[self._mask == 0] = level
self._mask[masked] = 0
self._notify()
# Drawing operations
def updateRectangle(self, level, row, col, height, width, mask=True):
"""Mask/Unmask a rectangle of the given mask level.
:param int level: Mask level to update.
:param int row: Starting row of the rectangle
:param int col: Starting column of the rectangle
:param int height:
:param int width:
:param bool mask: True to mask (default), False to unmask.
"""
assert 0 < level < 256
selection = self._mask[max(0, row):row + height + 1,
max(0, col):col + width + 1]
if mask:
selection[:, :] = level
else:
selection[selection == level] = 0
self._notify()
def updatePolygon(self, level, vertices, mask=True):
"""Mask/Unmask a polygon of the given mask level.
:param int level: Mask level to update.
:param vertices: Nx2 array of polygon corners as (row, col)
:param bool mask: True to mask (default), False to unmask.
"""
fill = shapes.polygon_fill_mask(vertices, self._mask.shape)
if mask:
self._mask[fill != 0] = level
else:
self._mask[numpy.logical_and(fill != 0,
self._mask == level)] = 0
self._notify()
def updatePoints(self, level, rows, cols, mask=True):
"""Mask/Unmask points with given coordinates.
:param int level: Mask level to update.
:param rows: Rows of selected points
:type rows: 1D numpy.ndarray
:param cols: Columns of selected points
:type cols: 1D numpy.ndarray
:param bool mask: True to mask (default), False to unmask.
"""
valid = numpy.logical_and(
numpy.logical_and(rows >= 0, cols >= 0),
numpy.logical_and(rows < self._mask.shape[0],
cols < self._mask.shape[1]))
rows, cols = rows[valid], cols[valid]
if mask:
self._mask[rows, cols] = level
else:
inMask = self._mask[rows, cols] == level
self._mask[rows[inMask], cols[inMask]] = 0
self._notify()
def updateStencil(self, level, stencil, mask=True):
"""Mask/Unmask area from boolean mask.
:param int level: Mask level to update.
:param stencil: Boolean mask of mask values to update
:type stencil: numpy.array of same dimension as the mask
:param bool mask: True to mask (default), False to unmask.
"""
rows, cols = numpy.nonzero(stencil)
self.updatePoints(level, rows, cols, mask)
def updateDisk(self, level, crow, ccol, radius, mask=True):
"""Mask/Unmask a disk of the given mask level.
:param int level: Mask level to update.
:param int crow: Disk center row.
:param int ccol: Disk center column.
:param float radius: Radius of the disk in mask array unit
:param bool mask: True to mask (default), False to unmask.
"""
rows, cols = shapes.circle_fill(crow, ccol, radius)
self.updatePoints(level, rows, cols, mask)
def updateLine(self, level, row0, col0, row1, col1, width, mask=True):
"""Mask/Unmask a line of the given mask level.
:param int level: Mask level to update.
:param int row0: Row of the starting point.
:param int col0: Column of the starting point.
:param int row1: Row of the end point.
:param int col1: Column of the end point.
:param int width: Width of the line in mask array unit.
:param bool mask: True to mask (default), False to unmask.
"""
rows, cols = shapes.draw_line(row0, col0, row1, col1, width)
self.updatePoints(level, rows, cols, mask)
class MaskToolsWidget(qt.QWidget):
"""Widget with tools for drawing mask on an image in a PlotWidget."""
_maxLevelNumber = 255
def __init__(self, parent=None, plot=None):
# register if the user as force a color for the corresponding mask level
self._defaultColors = numpy.ones((self._maxLevelNumber + 1), dtype=numpy.bool)
# overlays colors set by the user
self._overlayColors = numpy.zeros((self._maxLevelNumber + 1, 3), dtype=numpy.float32)
self._plot = plot
self._maskName = '__MASK_TOOLS_%d' % id(self) # Legend of the mask
self._colormap = {
'name': None,
'normalization': 'linear',
'autoscale': False,
'vmin': 0, 'vmax': self._maxLevelNumber,
'colors': None}
self._defaultOverlayColor = rgba('gray') # Color of the mask
self._setMaskColors(1, 0.5)
self._origin = (0., 0.) # Mask origin in plot
self._scale = (1., 1.) # Mask scale in plot
self._z = 1 # Mask layer in plot
self._data = numpy.zeros((0, 0), dtype=numpy.uint8) # Store image
self._mask = Mask()
self._mask.sigChanged.connect(self._updatePlotMask)
self._drawingMode = None # Store current drawing mode
self._lastPencilPos = None
self._multipleMasks = 'exclusive'
super(MaskToolsWidget, self).__init__(parent)
self._initWidgets()
self._maskFileDir = qt.QDir.home().absolutePath()
self.plot.sigInteractiveModeChanged.connect(
self._interactiveModeChanged)
def getSelectionMask(self, copy=True):
"""Get the current mask as a 2D array.
:param bool copy: True (default) to get a copy of the mask.
If False, the returned array MUST not be modified.
:return: The array of the mask with dimension of the 'active' image.
If there is no active image, an empty array is returned.
:rtype: 2D numpy.ndarray of uint8
"""
return self._mask.getMask(copy=copy)
def setSelectionMask(self, mask, copy=True):
"""Set the mask to a new array.
:param numpy.ndarray mask: The array to use for the mask.
:type mask: numpy.ndarray of uint8 of dimension 2, C-contiguous.
Array of other types are converted.
:param bool copy: True (the default) to copy the array,
False to use it as is if possible.
:return: None if failed, shape of mask as 2-tuple if successful.
The mask can be cropped or padded to fit active image,
the returned shape is that of the active image.
"""
mask = | numpy.array(mask, copy=False, dtype=numpy.uint8) | numpy.array |
import numpy as np
import os
from nltk import ngrams
from pandas.core.frame import DataFrame
import os
import time
import random
import pickle
import math
from keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from tensorflow import keras
from collections import Counter
from collections import defaultdict
#Setting pointing to where one wants to load and save data.
os.chdir("/home/ubuntu/mika/next_event_prediction/data")
#Global variables
_ngrams_ = 5
_start_ ="SoS" #Start of Sequence used in padding the sequence
_end_ = "EoS" #End of Sequence used in padding the sequence
#More clo
n_gram_counter = Counter()
n_gram_counter_1 = Counter()
n1_gram_dict = defaultdict() # to keep mappings of possible following events e1 e2 -> e1 e2 e3, e1 e2 e4,
n1_gram_winner = dict() #What is the event n following n-1 gram, i.e. the prediction ?
def create_ngram_model(train_data):
global n_gram_counter, n_gram_counter_1
ngrams = list()
ngrams_1 = list()
for seq in train_data:
seqs, seqs_1 = slice_to_ngrams(seq)
ngrams.extend(seqs)
ngrams_1.extend(seqs_1)
n_gram_counter += Counter (ngrams)
n_gram_counter_1 += Counter (ngrams_1)
for idx, s in enumerate(ngrams):
#dictionary for faster access from n-1 grams to n-grams, e.g. from [e1 e2 e3] -> [e1 e2 e3 e4]; [e1 e2 e3] -> [e1 e2 e3 e5] etc...
n1_gram_dict.setdefault(ngrams_1[idx],[]).append(s)
#precompute the most likely sequence following n-1gram. Needed to keep prediction times fast
if (ngrams_1[idx] in n1_gram_winner): #is there existing winner
n_gram = n1_gram_winner[ngrams_1[idx]]
if (n_gram_counter[n_gram] < n_gram_counter[s]): #there is but we are bigger replace
n1_gram_winner[ngrams_1[idx]] = s
else:
n1_gram_winner[ngrams_1[idx]] = s #no n-1-gram key or winner add a new one...
#Produce required n-grams. E.g. With sequence [e1 ... e5] and _ngrams_=3 we produce [e1 e2 e3], [e2 e3 e4], and [e3 e4 5]
def slice_to_ngrams (seq):
#Add SoS and EoS
#with n-gram 3 it is SoS SoS E1 E2 E3 EoS
#No need to pad more than one EoS as the final event to be predicted is EoS
seq = [_start_]*(_ngrams_-1) +seq+[_end_]
ngrams = list()
ngrams_1 = list()
for i in range(_ngrams_, len(seq)+1):#len +1 because [0:i] leaves out the last element
ngram_s = seq[i-_ngrams_:i]
# convert into a line
line = ' '.join(ngram_s)
# store
ngrams.append(line)
ngram_s_1= seq[i-_ngrams_:i-1]
line2 = ' '.join(ngram_s_1)
# store
ngrams_1.append(line2)
return ngrams, ngrams_1
# Return two anomaly scores as in the paper
# Ano score per line (i.e. given the previous lines how probable is this line).
# And n of occurences per line seen in the past
def give_ano_score (seq):
seq_shingle, seq_shingle_1 = slice_to_ngrams(seq)
scores = list()
for s in seq_shingle:
scores.append(n_gram_counter [s])
scores_1 = list()
for s in seq_shingle_1:
scores_1.append(n_gram_counter_1 [s])
#Remove 0s from n1 gram list to get rid of division by zero.
# If n-1 gram is zero following n-gram must be zero as well so it does not effect the results
scores_1 = [1 if i ==0 else i for i in scores_1]
#Convert n-gram freq counts to probs of n-gram given n-gram-minus-1
scores_prop = np.divide(np.array(scores), np.array(scores_1))
scores_abs = np.array(scores)
return (scores_prop, scores_abs)
def load_pro_data():
pro_x = np.load("profilence_x_data.npy", allow_pickle=True)
pro_y = np.load("profilence_y_data.npy", allow_pickle=True)
pro_y = pro_y == 1
abnormal_test = pro_x[pro_y]
pro_x_normal = pro_x[~pro_y]
from nltk import ngrams
lengths = list()
for seq in pro_x_normal:
lengths.append(len(seq))
#zeros = np.array([True if i ==0 else False for i in lengths])
#pro_x_normal = pro_x_normal[~zeros]
#Remove the short logs less than 10000
ten_k_lenght = np.array([True if i >= 10000 else False for i in lengths])
pro_x_normal = pro_x_normal[ten_k_lenght]
normal_data = pro_x_normal
return normal_data, abnormal_test
def load_hdfs_data():
hdfs_x = np.load("hdfs_x_data.npy", allow_pickle=True)
hdfs_y = np.load("hdfs_y_data.npy", allow_pickle=True)
hdfs_y = hdfs_y == 1
hdfs_x_normal = hdfs_x[~hdfs_y]
abnormal_test = hdfs_x[hdfs_y]
normal_data = hdfs_x_normal
return normal_data, abnormal_test
#Reset global n-gram variables. Used when creating multiple n-gram models
def reset_globals():
global n_gram_counter, n_gram_counter_1, n1_gram_dict, n1_gram_winner
n_gram_counter = Counter()
n_gram_counter_1 = Counter()
from collections import defaultdict
n1_gram_dict = defaultdict() # to keep mappings of possible following events e1 e2 -> e1 e2 e3, e1 e2 e4,
n1_gram_winner = dict()
#sequences = list()
#sequences_1 = list()
def create_LSTM_model(ngrams, vocab_size, share_of_data=1):
#If we want to use less than 100% of data select samples. I am not sure this is ever used
if (share_of_data < 1):
select = int(len(ngrams) * share_of_data)
ngrams = random.sample(ngrams, select)
random.shuffle(ngrams)
# How many dimensions will be used to represent each event.
# With words one would use higher values here, e.g. 200-400
# Higher values did not improve accuracy but did reduce perfomance. Even 50 might be too much
dimensions_to_represent_event = 50
model = Sequential()
model.add(Embedding(vocab_size, dimensions_to_represent_event, input_length=_ngrams_-1))
# We will use a two LSTM hidden layers with 100 memory cells each.
# More memory cells and a deeper network may achieve better results.
model.add(LSTM(100, return_sequences=True))
model.add(LSTM(100))
model.add(Dense(100, activation='relu'))
model.add(Dense(vocab_size, activation='softmax'))
print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#Loop needed as Office PC would crash in the to_categorixal with Profilence data set as it out of memory.
#TODO: Do we need a loop when using CSC HW?
loop_variable = 50000
for x in range(0, len(ngrams), loop_variable):
print(f'loop with x= {x}. / {len(ngrams)}')
ngrams0 = np.array(ngrams[x:x+loop_variable])
X, y = ngrams0[:,:-1], ngrams0[:,-1]
y = to_categorical(y, num_classes=vocab_size)
#Modify batch_size and epoch to influence the training time and resulting accuracy.
history = model.fit(X, y, validation_split=0.05, batch_size=1024, epochs=10, shuffle=True).history
return model
# We need to change events e1 e2 e3 to numbers for the DL model so they are mapped here, e.g. e1 -> 137, e2 -> 342
def sequences_to_dl_ngrams (train_data):
ngrams = list() #ngrams= []
for seq in train_data:
t_ngrams, t_ngrams_1 = slice_to_ngrams(seq)
ngrams.extend(t_ngrams)
tokenizer = Tokenizer(oov_token=1)
tokenizer.fit_on_texts(ngrams)
ngrams_num = tokenizer.texts_to_sequences(ngrams)
vocab_size = len(tokenizer.word_index) + 1
return ngrams, ngrams_num, vocab_size, tokenizer
#Gives N-gram predictions
def give_preds (seq):
seq_shingle, seq_shingle_1 = slice_to_ngrams(seq)
# print(seq_shingle)
correct_preds = list()
for s in seq_shingle:
to_be_matched_s = s.rpartition(' ')[0]
#print("to be matched " + to_be_matched_s)
if (to_be_matched_s in n1_gram_dict):
winner = n1_gram_winner[to_be_matched_s]
if (winner == s):
correct_preds.append(1)
#print("correct")
else:
correct_preds.append(0)
#print("incorrec predic")
else:
correct_preds.append(0)
#print("no key")
return correct_preds
#LSTM prediction per sequence. Typically called from loop that with HDFS is not efficient
def give_preds_lstm (seq):
seq_shingle, seq_shingle_1 = slice_to_ngrams(seq)
seq_shingle_num = lstm_tokenizer.texts_to_sequences(seq_shingle)
seq_shingle_num_np = np.array(seq_shingle_num)
seq_shingle_num_1 = seq_shingle_num_np[:,:-1]
seq_shingle_truth = seq_shingle_num_np[:,-1]
#predicted_sec = model.predict(seq_shingle_num_1)
predicted_sec = model.predict(seq_shingle_num_1,verbose=1, batch_size=4096)
predicted_events = np.argmax(predicted_sec, axis=1)
correct_preds = seq_shingle_truth == predicted_events
return correct_preds
#LSTM predictions with multiple sequences packed in numpy array
def give_preds_lstm_2 (sequences, b_size=4096):
seq_shingle = list()
#check if this is an array of sequences
start_s = time.time()
if (isinstance(sequences, np.ndarray)):
for s in sequences:
temp_seq_shingle, temp_seq_shingle_1 = slice_to_ngrams(s)
seq_shingle.extend(temp_seq_shingle)
else: #if not numpy array then as
seq_shingle, seq_shingle_1 = slice_to_ngrams(sequences)
end_s = time.time()
print("Shingle creation took", end_s - start_s)
start_s = time.time()
seq_shingle_num = lstm_tokenizer.texts_to_sequences(seq_shingle) #do this before slice to n-grams
end_s = time.time()
print("lstm_tokenizer took", end_s - start_s)
seq_shingle_num_np = np.array(seq_shingle_num)
seq_shingle_num_1 = seq_shingle_num_np[:,:-1]
seq_shingle_truth = seq_shingle_num_np[:,-1]
#predicted_sec = model.predict(seq_shingle_num_1)
start_s = time.time()
predicted_sec = model.predict(seq_shingle_num_1,verbose=1, batch_size=b_size)
end_s = time.time()
print("prediction took", end_s - start_s)
#predicted_sec = model.predict(seq_shingle_num_1, verbose=1, use_multiprocessing = True, max_queue_size=100,workers=4)
predicted_events = np.argmax(predicted_sec, axis=1)
correct_preds = seq_shingle_truth == predicted_events
return correct_preds
# END of Functions-------------------------------------------------------------------------------------------------------------------
# What follows should executed line-by-line
#RQ0 Demo case of metrics in paper shown in the final table-------------------------------------------------------------------------------------------
normal_data, abnormal_test = load_hdfs_data()
_ngrams_=5
create_ngram_model(normal_data)
ab_failure = list( abnormal_test[2] ) #1st fail is FFWH 2nd is WF 3rd is the first long
ano_score = give_ano_score (ab_failure)
for i in range(len(ab_failure)):
print(ab_failure[i]," ", ano_score[1][i], " ", ano_score[0][i])
if (i+1 == len(ab_failure)):
print("EoS ", ano_score[1][i], " ", ano_score[0][i])
print (ano_score[1])
np.average(ano_score[0])
np.percentile(ano_score[1],5)
len(ano_score[0])
#RQ0 Some basic stats for the paper e.g. number of n-grams in data---------------------------------------------------
normal_data, abnormal_test = load_pro_data()
normal_data, abnormal_test = load_hdfs_data()
_ngrams_=1
ngrams = list()
for seq in normal_data:
seqs, seqs_1 = slice_to_ngrams(seq)
ngrams.extend(seqs)
ngrams = np.array(ngrams)
win_unique, win_counts = np.unique(ngrams, return_counts=True)
win_counts[np.argmax(win_counts)]
for i in range(10):
_ngrams_ = i+1
start_s = time.time()
ngrams = list()
for seq in normal_data:
seqs, seqs_1 = slice_to_ngrams(seq)
ngrams.extend(seqs)
win_unique, win_counts = np.unique(ngrams, return_counts=True)
end_s = time.time()
print ("N-grams: ",_ngrams_," Unique:", len(win_unique), "Done in:", end_s-start_s)
# RQ1---------------------------------------------------------------------------------------------------
# Data loading
#Select variable on which data set to load
data="hdfs"
data="pro"
if (data=="hdfs"):
print("Setting data to HDFS")
normal_train = np.loadtxt('split_normal_hdfs_train.txt') #load split
normal_data, abnormal_test = load_hdfs_data() #load data
elif(data=="pro"):
print("Setting data to PRO")
normal_train = np.loadtxt('split_normal_pro_train.txt') #load split
normal_data, abnormal_test = load_pro_data() #load data"
normal_train = np.array(normal_train, dtype=bool)
normal_test = normal_data[~normal_train]
#Creating split. Uncomment if new split needed. Currently we just load the pre-saved split
#train_i = np.random.choice(normal_data.shape[0], np.floor_divide(normal_data.shape[0],2), replace=False)
#normal_train = np.isin(range(normal_data.shape[0]), train_i)
#save data
#np.savetxt('split_normal_pro_train.txt', normal_train, fmt='%d') #PRO
#np.savetxt('split_normal_hdfs_train.txt', normal_train, fmt='%d') #HDFS
#---Create models
#ngram---------------------------------------------------------
_ngrams_ = 5
#ngram model
start_s = time.time()
create_ngram_model(normal_data[normal_train])
end_s = time.time()
print("ngram with ngrams:", _ngrams_, "done in", end_s - start_s)
#lstm model-load/creation---------------------------------------------
create_model = "yes"
create_model = "no"
if (create_model=="yes"):
start_s = time.time()
lstm_ngrams, lstm_ngrams_num, lstm_vocab_size, lstm_tokenizer = sequences_to_dl_ngrams(normal_data[normal_train])
model = create_LSTM_model(lstm_ngrams_num, lstm_vocab_size, share_of_data=1)
end_s = time.time()
print("lstm with ngrams:", _ngrams_, "done in", end_s - start_s)
if (data=="hdfs"):
#load save model
#model.save("ngram5_lstm_hdfs_50_normal_all_data_20_11_2021")
#model.save("ngram5_lstm_hdfs_50_normal_all_data_14_01_2022")
model.save("ngram5_lstm_hdfs_50_normal_all_data_CURRENT_DATE")
# saving tokenizer
with open('tokenizer_5_lstm_hdfs_50__CURRENT_DATE.pickle', 'wb') as handle:
pickle.dump(lstm_tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
elif(data=="pro"):
#Model save / load
#model.save("ngram5_lstm_pro_50_normal_all_data_14_01_22")
model = keras.models.load_model("ngram5_lstm_pro_50_normal_all_data_CURRENT_DATE")
# saving tokenizer
with open('tokenizer_5_lstm_pro_50_CURRENT_DATE.pickle', 'wb') as handle:
pickle.dump(lstm_tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
elif(create_model=="no"):
if (data=="hdfs"):
model = keras.models.load_model("ngram5_lstm_hdfs_50_normal_all_data_14_01_2022")
with open('tokenizer_5_lstm_hdfs_50_14_01_22.pickle', 'rb') as handle:
lstm_tokenizer = pickle.load(handle)
elif(data=="pro"):
model = keras.models.load_model("ngram5_lstm_pro_50_normal_all_data_14_01_22")
with open('tokenizer_5_lstm_pro_50_14_01_22.pickle', 'rb') as handle:
lstm_tokenizer = pickle.load(handle)
# LSTM Prediction------------------------------------------------------------------
#LSTM much faster with HDFS as one predict call instead of loop
start_s = time.time()
lstm_preds_all = list()
if (data=="hdfs"):
lstm_preds_all = give_preds_lstm_2(normal_test)
elif (data=="pro"):#Cannot do all pro data in one pass runs out of memory at 15gigs. Split to five calls
for i in range(int(len(normal_test)/10)):
lstm_preds_t = give_preds_lstm_2(normal_test[i:i+10])
lstm_preds_all.extend(lstm_preds_t)
end_s = time.time()
print("prediction time lstm with ngrams:", _ngrams_, "done in", end_s - start_s)
np.mean(lstm_preds_all)
#LSTM with loop. Warning SLOW for HDFS!
start_s = time.time()
print("len is", len(normal_test))
progress = math.floor(len(normal_test)/10)
lstm_preds = list()
for i in range(len(normal_test)):
if (i % progress ==0): #as it is slow print line every 10% of progress elements
print ("loop is at:",i,"/",len(normal_test))
preds_2 = give_preds_lstm(normal_test[i])
lstm_preds.append(preds_2)
end_s = time.time()
print("prediction time lstm with ngrams:", _ngrams_, "done in", end_s - start_s)
#---------------------------------------------------
#Studying results of lstm prediction
lstm_preds_means = list()
for preds in lstm_preds:
lstm_mean = np.mean(preds)
lstm_preds_means.append(lstm_mean)
#print (np.mean(lstm_mean))
print("Mean of means", np.mean(lstm_preds_means))
#ngram prediction-------------------------------------------
#ngram test with loop
ngram_preds = list()
ngram_preds2 = list()
start_s = time.time()
for normal_s in normal_test:
preds = give_preds(normal_s)
ngram_preds.append(preds)
ngram_preds2.extend(preds)
#print(".")
end_s = time.time()
print("prediction time ngram with ngrams:", _ngrams_, "done in", end_s - start_s)
#ngram investigate
ngram_preds_means = list()
for preds in ngram_preds:
ngram_mean = np.mean(preds)
ngram_preds_means.append(ngram_mean)
#print (np.mean(lstm_mean))
print("Mean of means", np.mean(ngram_preds_means))
np.mean(ngram_preds2)
save_string = "Loop_LSTM_"+"22022022_"+data+"_"+str(3)
#model.save(save_string)
model = keras.models.load_model(save_string)
# saving tokenizer
#with open(save_string+"_tokenizer.pickle", 'wb') as handle:
# pickle.dump(lstm_tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
#load tokenizer
with open(save_string+"_tokenizer.pickle", 'rb') as handle:
lstm_tokenizer = pickle.load(handle)
#Joint prediction again in CSC some crashes---------------------------------------------
lstm_preds = list()
ngram_preds = list()
for normal_s in normal_test:
preds = give_preds(normal_s)
ngram_preds.append(preds)
preds_2 = give_preds_lstm(normal_s)
lstm_preds.append(preds_2)
print("Ngram accuracy:",np.mean(preds), "LSTM accuracy", np.mean(preds_2))
#save and load predictions
# with open("lstm_hdfs_preds.txt", "wb") as fp: #Pickling
# pickle.dump(lstm_preds, fp)
# with open("ngram_hdfs_preds.txt", "wb") as fp: #Pickling
# pickle.dump(ngram_preds, fp)
with open("lstm_hdfs_preds.txt", "rb") as fp: # Unpickling
lstm_preds = pickle.load(fp)
with open("ngram_hdfs_preds.txt", "rb") as fp: # Unpickling
ngram_preds = pickle.load(fp)
#investigate predictions-both------------------------
#here we can also do sequence by sequence investigation computs wins, ties, losses
lstm_sum= 0
tie_sum = 0
ngram_sum = 0
lstm_preds_means = list()
ngram_preds_means = list()
for i in range(len(lstm_preds)):
lstm_mean = np.mean(lstm_preds[i])
ngram_mean = np.mean(ngram_preds[i])
lstm_preds_means.append(lstm_mean)
ngram_preds_means.append(ngram_mean)
if (math.isclose(lstm_mean, ngram_mean, rel_tol=1e-4)):
#if ( lstm_mean == ngram_mean):
tie_sum = tie_sum +1
elif (lstm_mean> ngram_mean):
lstm_sum = lstm_sum +1
else:
ngram_sum = ngram_sum +1
| np.mean(lstm_preds_means) | numpy.mean |
"""PSF module
This module provides a class implementing a spatially varying PSF.
Intended usage is:
>>> unknown ***
"""
import pdb
import numpy
def shift(im, offset, **kw):
"""Wrapper for scipy.ndimage.interpolation.shift"""
from scipy.ndimage.interpolation import shift
if 'order' not in kw:
kw['order'] = 4
# 1" Gaussian: 60 umag; 0.75": 0.4 mmag; 0.5": 4 mmag
# order=3 roughly 5x worse.
if 'mode' not in kw:
kw['mode'] = 'nearest'
if 'output' not in kw:
kw['output'] = im.dtype
return shift(im, offset, **kw)
def central_stamp(stamp, censize=19):
if censize is None:
censize = 19
stampsz = stamp.shape[-1]
if ((stampsz % 2) == 0) | ((censize % 2) == 0):
pdb.set_trace()
if stampsz == censize:
return stamp
elif stampsz > censize:
trim = (stamp.shape[-1] - censize)//2
f = trim
l = stampsz - trim
return stamp[..., f:l, f:l]
else:
ret = numpy.zeros(stamp.shape[:-2]+(censize, censize), dtype='f4')
central_stamp(ret, censize=stampsz)[..., :, :] = stamp
return ret
def neff_fwhm(stamp):
"""FWHM-like quantity derived from N_eff = numpy.sum(PSF**2.)**-1"""
norm = numpy.sum(stamp, axis=(-1, -2), keepdims=True)
return 1.18 * (numpy.pi*numpy.sum((stamp/norm)**2., axis=(-1, -2)))**(-0.5)
def fwhm_neff(fwhm):
return (fwhm/1.18)**2*numpy.pi
def gaussian_psf(fwhm, stampsz=19, deriv=True, shift=[0, 0]):
"""Create Gaussian psf & derivatives for a given fwhm and stamp size.
Args:
fwhm (float): the full width at half maximum
stampsz (int): the return psf stamps are [stampsz, stampsz] in size
deriv (bool): return derivatives?
shift (float, float): shift centroid by this amount in x, y
Returns:
(psf, dpsfdx, dpsfdy)
psf (ndarray[stampsz, stampsz]): the psf stamp
dpsfdx (ndarray[stampsz, stampsz]): the x-derivative of the PSF
dpsfdy (ndarray[stampsz, stampsz]): the y-derivative of the PSF
"""
sigma = fwhm / numpy.sqrt(8*numpy.log(2))
stampszo2 = stampsz // 2
parshape = numpy.broadcast(fwhm, shift[0], shift[1]).shape
if len(parshape) > 0:
sigma, shift[0], shift[1] = (numpy.atleast_1d(q).reshape(-1, 1, 1)
for q in (sigma, shift[0], shift[1]))
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
yc = xc.copy()
xc = xc.reshape(-1, 1)-shift[0]
yc = yc.reshape(1, -1)-shift[1]
psf = numpy.exp(-(xc**2. + yc**2.) /
2./sigma**2.).astype('f4')
psf /= numpy.sum(psf[..., :, :])
dpsfdx = xc/sigma**2.*psf
dpsfdy = yc/sigma**2.*psf
ret = psf
if deriv:
ret = (ret,) + (dpsfdx, dpsfdy)
return ret
def moffat_psf(fwhm, beta=3., xy=0., yy=1., stampsz=19, deriv=True,
shift=[0, 0]):
"""Create Moffat psf & derivatives for a given fwhm and stamp size.
Args:
fwhm (float): the full width at half maximum
beta (float): beta parameter for Moffat distribution
xy (float): xy coefficient (0 for uncorrelated)
yy (float): yy coefficient (1 for FWHM_x == FWHM_y)
stampsz (int): the returned psf stamps are [stampsz, stampsz] in size
deriv (bool): return derivatives?
shift (float, float): shift centroid by this amount in x, y
Returns:
(psf, dpsfdx, dpsfdy)
psf (ndarray[stampsz, stampsz]): the psf stamp
dpsfdx (ndarray[stampsz, stampsz]): the x-derivative of the PSF
dpsfdy (ndarray[stampsz, stampsz]): the y-derivative of the PSF
"""
if numpy.any(beta <= 1e-3):
print('Warning: crazy values for beta in moffat_psf')
beta = numpy.clip(beta, 1e-3, numpy.inf)
alpha = fwhm/(2*numpy.sqrt(2**(1./beta)-1))
stampszo2 = stampsz // 2
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
parshape = numpy.broadcast(fwhm, beta, xy, yy, shift[0], shift[1]).shape
xc = xc.reshape(-1, 1)
yc = xc.copy().reshape(1, -1)
if len(parshape) > 0:
alpha, beta, xy, yy = (numpy.atleast_1d(q).reshape(-1, 1, 1)
for q in (alpha, beta, xy, yy))
shift = list(shift)
shift[0], shift[1] = (numpy.atleast_1d(q).reshape(-1, 1, 1)
for q in (shift[0], shift[1]))
xc = xc - shift[0]
yc = yc - shift[1]
yy = numpy.abs(yy)
rc2 = yy**(-0.5)*xc**2. + xy*xc*yc + yy**(0.5)*yc**2.
# for bad xy, this can screw up and generate negative values.
if numpy.any(rc2 < 0.):
print('Warning: crazy xy and yy values to moffat_psf')
rc2 = numpy.clip(rc2, 0., numpy.inf)
rc = numpy.sqrt(rc2)
psf = (beta - 1)/(numpy.pi * alpha**2.)*(1.+(rc**2./alpha**2.))**(-beta)
ret = psf
if deriv:
dpsffac = (beta-1)/(numpy.pi*alpha**2.)*(beta)*(
(1+(rc**2./alpha**2.))**(-beta-1))
dpsfdx = dpsffac*2*xc/alpha
dpsfdy = dpsffac*2*yc/alpha
ret = (psf, dpsfdx, dpsfdy)
return ret
def simple_centroid(psf, norm=True):
stampsz = psf.shape[-1]
stampszo2 = stampsz // 2
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
xc = xc.reshape(-1, 1)
yc = xc.copy().reshape(1, -1)
denom = 1.
if norm:
denom = numpy.sum(psf, axis=(-1, -2))
return (numpy.sum(xc*psf, axis=(-1, -2))/denom,
numpy.sum(yc*psf, axis=(-1, -2))/denom)
def center_psf(psf, censize=None):
"""Center and normalize a psf; centroid is placed at center."""
psf = psf.copy()
cpsf = central_stamp(psf, censize=censize)
for _ in range(3):
xcen, ycen = simple_centroid(cpsf)
psf[:, :] = shift(psf, [-xcen, -ycen],
output=numpy.dtype('f4'))
psf /= numpy.sum(psf)
psf = psf.astype('f4')
return psf
class SimplePSF:
def __init__(self, stamp, normalize=19):
self.stamp = stamp
if normalize > 0:
norm = numpy.sum(central_stamp(stamp, censize=normalize))
self.stamp /= norm
self.deriv = | numpy.gradient(-stamp) | numpy.gradient |
import numpy as np
try:
import numba
nopython_wrapper = numba.njit
except (ImportError):
nopython_wrapper = lambda x: x
def to_array_if_not(x):
"""Convert a value to a numpy array if it isn't already one."""
return | np.array(x) | numpy.array |
#!/usr/bin/env python
import numpy as np
import os.path
import cStringIO
import string
from basicio import utils
import os, sys
_here = os.path.dirname(os.path.realpath(__file__))
__all__ = ['file2recarray', 'strarray2recarray', 'file2strarray', 'getheaders', 'arraydtypes']
def file2strarray(file, buffer=False, delimitter='', datastring=None,
ignorestring=None):
"""
load table-like data having consistent columns in a file or string into a
numpy array of strings
Parameters
----------
file: string, mandatory
absolute path to file containing the data, or a string containing the
data (with rows separated by new line characters). If file is not the
path to a file, then buffer must be true
buffer: optional, bool, defaults to False
If file is a string rather than the path to a file, this must be true
delimitter: string, optional, defaults to ''
type of delimitter used in the file
datastring: string, optional, defaults to `None`
if not none, assume that all lines containing data are prepended by
this string; therefore select only such lines, and strip this character
off.
ignorestring: string, optional, defaults to `None`
string after which any line is ignored
Returns
-------
`numpy.ndarray` of strings
Examples
--------
>>> fname = os.path.join(_here, 'example_data/table_data.dat')
>>> d = file2strarray(fname)
>>> type(d)
<type 'numpy.ndarray'>
>>> # One can access the elements in the usual `numpy.ndarray` way
>>> d[1, 3]
'4.6774e-04'
>>> print np.shape(d)
(96, 27)
>>> fp = open(fname)
>>> contents = fp.read()
>>> fp.close()
>>> dd = file2strarray(contents, buffer=True)
>>> (d == dd).all()
True
>>> fname = os.path.join(_here,'example_data/table_data_ps.dat')
>>> x = file2strarray(fname, datastring='SN:')
>>> np.shape(x)
(2, 27)
.. note:: 1. Cofirmation of buffer was introduced in order to prevent \
errors where an incorrect filename passed was interpreted as a \
buffer.
"""
# Check if this is a path to a file or a string
if os.path.isfile(file):
fp = open(file)
else:
# this is a string, Check if buffer is true
if not buffer:
raise ValueError('The file does not exist, and buffer is False,\
so cannot iterpret as data stream')
fp = cStringIO.StringIO(file)
# line = fp.readline()
# line = line.strip()
data = []
# while line != '':
for i, line in enumerate(fp):
lst = []
line = line.strip()
# probably can get rid of the not line case as it will be trapped by else
if not line:
continue
if datastring is None:
lst = utils.tokenizeline(line, delimitter=delimitter)[0]
# data.append(lst)
elif line.startswith(datastring):
# print 'line ', line
lst = utils.tokenizeline(line, delimitter=delimitter,
prependstring=datastring,
ignorestrings=ignorestring)[0]
if len(lst) > 0:
data.append(lst)
fp.close()
data = | np.asarray(data) | numpy.asarray |
from __future__ import print_function, division
import matplotlib
#matplotlib.use('Agg')
import numpy as np
import scipy as sp
from operator import truediv
import math, time
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from itertools import groupby
import sisl as si
from numbers import Integral
# I don't know why, but the lines below were
# fucking up my routine "makeTB_FrameOutside", on the "contruct" command
#try:
# from itertools import izip as zip
#except:
# pass
def dagger(M):
return np.conjugate(np.transpose(M))
def displaySparse(m, filename, dpi=300):
if not isinstance(m, sp.sparse.coo_matrix):
m = sp.sparse.coo_matrix(m)
fig = plt.figure()
ax = fig.add_subplot(111, axisbg='black')
ax.plot(m.col, m.row, 's', color='white', ms=10)
ax.set_xlim(0, m.shape[1])
ax.set_ylim(0, m.shape[0])
ax.set_aspect('equal')
for spine in ax.spines.values():
spine.set_visible(False)
ax.invert_yaxis()
ax.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
plt.savefig(filename, facecolor='black', edgecolor='black', dpi=dpi)
return ax
def get_potential(TSHS, iio, atoms):
"""
iio: index (0-based) of orbital in basis set (i.e., pz in SZP: iio = 2)
"""
orbs = TSHS.a2o(atoms)+iio
on = TSHS.Hk(dtype=np.float64, format='array')[orbs, orbs]
return on
def check_Dirac(ts, mp, displacement=[0,0,0]):
mp = si.MonkhorstPack(ts, mp, displacement=displacement)
print('Check that Dirac is in here: ')
print(mp.k)
print('Check that this is in *.KP file : {}'.format(mp.tocartesian([0., 1./3, 0]) * si.unit.siesta.unit_convert('Bohr', 'Ang')))
i_dirac = (np.logical_and(mp.k[:,1] == 1./3, mp.k[:,0] == 0.)).nonzero()[0]
if len(i_dirac) != 1:
print('Dirac point is not in the grid')
exit(1)
else:
print('Dirac point is at kindex: {}'.format(i_dirac[0]))
def get_Dirac(hs, mp, displacement=[0,0,0]):
#check_Dirac(hs.geom, mp, displacement)
ens_dirac = hs.eigh(k=[0., 1./3, 0])
i_dirac = hs.na * 2 - 1
return np.average(ens_dirac[i_dirac:i_dirac+2])
def plot_PotDiff(TSHS, TSHS_0, ia, axis, iio, o_dev, o_inner): # include option for frame!
on, yy, atoms = get_potential(TSHS, ia, axis, iio)
on0 = get_potential(TSHS_0, ia, axis, iio)[0]
on0 = np.array([np.mean(on0)]*len(on))
# Check
print('y (Ang)\t\tPot (eV)\tPot0 (eV)\tPot-Pot0 (eV)')
a_dev = TSHS.o2a(o_dev, unique=True)
a_inner = TSHS.o2a(o_inner, unique=True)
for iia, y, o, o0 in zip(atoms, yy, on, on0):
if iia in a_inner:
print('{:7.4f}\t\t{:7.4f}\t\t{:7.4f}\t\t{:7.4f}\t\t(inner)'.format(y,o,o0,o-o0))
else:
print('{:7.4f}\t\t{:7.4f}\t\t{:7.4f}\t\t{:7.4f}'.format(y,o,o0,o-o0))
# Subtract pristine potential
PotDiff = on-on0
# Write to file
with open('PotDiff.dat', 'w') as pf:
for yc, pd in zip(yy, PotDiff):
pf.write('{}\t\t{}\n'.format(yc, pd))
# Plot
figure()
plot(yy, PotDiff, 'b')
md, Md = np.amin(TSHS.xyz[a_dev, axis]), np.amax(TSHS.xyz[a_dev, axis])
axvline(md, color='k', linestyle='dashed', linewidth=2)
axvline(Md, color='k', linestyle='dashed', linewidth=2)
tmp_dev = TSHS.geom.sub(a_dev); tmp_inner = tmp_dev.sub(a_inner)
mi, Mi = np.amin(tmp_inner.xyz[a_inner, axis]), np.amax(tmp_inner.xyz[a_inner, axis])
axvspan(mi, Mi, alpha=0.3, facecolor='blue', edgecolor='none')
ylabel(r'$H_{p_z}-H^0_{p_z}\, (e{\rm V})$', fontsize=20)
xlabel(r'$y\, (\AA)$', fontsize=20)
xlim(0, TSHS.cell[axis, axis])
#xlim(TSHS.center(what='cell')[1], TSHS.cell[1,1])
legend(loc=0); savefig('PotDiff.pdf', bbox_inches='tight')
def get_potential_profile(TSHS, ia, axis, iio):
"""
ia: atom crossed by the line
axis: direction of the line
iio: index (0-based) of orbital in basis set (i.e., pz in SZP: iio = 2)
"""
# Find atoms in line passing by center of
xyz0, xyz = TSHS.xyz[ia, axis%1], TSHS.xyz[:, axis%1]
atoms = np.where(np.logical_and(xyz0-1.43 < xyz, xyz < xyz0+1.43))[0]
v = TSHS.geom.copy(); v.atom[atoms] = si.Atom(8, R=[1.43]); v.write('checkPot.xyz')
orbs = TSHS.a2o(atoms)+iio
on = TSHS.Hk(dtype=np.float64, format='array')[orbs, orbs]
ylist = TSHS.xyz[atoms, axis]
idxs = np.argsort(ylist)
on, ylist = on[idxs], ylist[idxs]
return on, ylist, atoms
def xyz2polar(tbt, origin=0):
na = tbt.na
# radii from origin
if isinstance(origin, Integral):
origin = tbt.xyz[origin]
_, r = tbt.geom.close_sc(origin, R=np.inf, ret_rij=True)
# angles from origin
transl = tbt.geom.translate(-origin)
y = transl.xyz[:,1]
i_ypos = np.where(y >= 0)[0]
i_yneg = np.setdiff1d(np.arange(na), i_ypos)
t = np.zeros(na)
t[i_ypos] = transl.angle(i_ypos, dir=(1., 0, 0), rad=True)
t[i_yneg] = transl.angle(i_yneg, dir=(-1., 0, 0), rad=True) +np.pi
return r, t
def radial_T_from_bc(tbt, elec, E=None, kavg=True,
origin=0, thetamin=0., thetamax=2*np.pi, ntheta=360,
Rmin=5., Rmax=999999999, dr=40.,
input=None, save='radial_T_from_bc.txt', saveinput='rt.txt'):
if E:
Eidx = tbt.Eindex(E)
en = tbt.E[Eidx]
else:
en = tbt.E[0]
print('Using E = {} eV'.format(en))
na = tbt.na
if isinstance(origin, Integral):
origin = tbt.xyz[origin]
# (x, y) ----> (r, t)
if input:
r, t = np.loadtxt(input, delimiter='\t', usecols=(1, 2), unpack=True, skiprows=1)
else:
r, t = xyz2polar(tbt, origin=origin)
f = open(saveinput, 'w')
f.write('ia\tr (Angstrom)\tangle (radians; center {})\n'.format(origin))
for ia, rr, tt in zip(np.arange(na), r, t):
f.write('{}\t{}\t{}\n'.format(ia, rr, tt))
f.close()
print('(x,y) ---> (r,t): DONE')
# theta bins
thetas = np.linspace(thetamin, thetamax, ntheta, endpoint=False)
dtheta = thetas[1]-thetas[0]
print(len(thetas), dtheta, thetas)
# Digitize t into thetas
inds = np.digitize(t, thetas) -1 # First bin is associated to 0.0 rad
print('Digitize theta: DONE')
# radii[i] is the radius of the interface between 2 crowns centered at the position of the tip
newRmax = np.amin(np.absolute(np.array([origin[0], origin[1],
(origin-tbt.cell[0]-tbt.cell[1])[0], (origin-tbt.cell[0]-tbt.cell[1])[1]])))
radii = np.arange(np.amax([Rmin, dr]), np.amin([Rmax, newRmax])+2*dr, dr)
nradii = len(radii)
print(nradii, dr, radii)
# indices of atom within the various shells
# atoms in list ishell[i] belong to [radii[i], radii[i+1]]
ishell = tbt.geom.close_sc(origin, R=radii, idx=tbt.a_dev)
print('Close: DONE')
# Read bond-current
bc = tbt.bond_current(0, en, kavg=kavg, only='all', uc=True)
print('bc: DONE')
Tavg = np.zeros(ntheta*nradii)
thetas_toplot = Tavg.copy()
radii_toplot = Tavg.copy()
j=0
for id in np.arange(ntheta): # Loop over unique angles
print(' Doing theta #{} of {} ({} rad)'.format(id+1, ntheta, thetas[id]))
idx_intheta = np.where(inds == id)[0] # find indices of atoms whose t is in sector theta
for id_r in np.arange(1,nradii-1): # Loop over unique radii
print(' Doing radius #{} of {} ({} Ang)'.format(id_r, nradii, radii[id_r]))
idx_1_indr = ishell[id_r] # Indices of atoms within internal shell
mask = np.in1d(idx_1_indr, idx_intheta)
idx_1 = idx_1_indr[mask] # Indices of atoms in internal shell AND sector theta
idx_2 = ishell[id_r+1] # # Indices of atoms within external shell
Tavg[j] = bc[idx_1.reshape(-1, 1), idx_2.reshape(1, -1)].sum()
thetas_toplot[j] = thetas[id]
radii_toplot[j] = radii[id_r]
#print(' ({} Ang, {} rad) --> {}'.format(radii_toplot[j], thetas_toplot[j], Tavg[j]))
j+=1
# Write
f = open(save, 'w')
f.write('center {}\n'.format(origin))
f.write('radius (Ang), \t theta (rad), \tT from radial bond current\n')
for rr, theta, ttt in zip(radii_toplot, thetas_toplot, Tavg):
f.write('{}\t{}\t{}\n'.format(rr, theta, ttt))
f.close()
return radii_toplot, thetas_toplot, Tavg
def atom_current_radial(tbt, elec, E, kavg=True, activity=True,
origin=0, thetamin=0., thetamax=2*np.pi, ntheta=360,
Rmin=5., Rmax=999999999, dr=40.,
input=None, save='atom_current_radial.txt', saveinput='ac_input.txt'):
if E:
Eidx = tbt.Eindex(E)
en = tbt.E[Eidx]
else:
en = tbt.E[0]
print('Using E = {} eV'.format(en))
na = tbt.na
if isinstance(origin, Integral):
origin = tbt.xyz[origin]
# (x, y) ----> (r, t)
if input:
r, t, ac = np.loadtxt(input, delimiter='\t', usecols=(1, 2, 3), unpack=True, skiprows=1)
else:
r, t = xyz2polar(tbt, origin=origin)
print('start extraction of atom_current...')
ac = tbt.atom_current(elec, E, kavg, activity)
print('...end extraction of atom_current')
f = open(saveinput, 'w')
f.write('ia\tr (Ang)\tangle (rad; center {})\tatom current\n'.format(origin))
for ia, rr, tt, a in zip(np.arange(na), r, t, ac):
f.write('{}\t{}\t{}\t{}\n'.format(ia, rr, tt, a))
f.close()
print('(x,y) ---> (r,t): DONE')
# theta bins
thetas = np.linspace(thetamin, thetamax, ntheta, endpoint=False)
dtheta = thetas[1]-thetas[0]
print('Thetas entries:')
print(len(thetas), dtheta, thetas)
# Digitize t into thetas
inds = np.digitize(t, thetas) -1 # First bin is associated to 0.0 rad
print('Digitize theta: DONE')
# radii[i] is the radius of the interface between 2 crowns centered at the position of the tip
newRmax = np.amin(np.absolute(np.array([origin[0], origin[1],
(origin-tbt.cell[0]-tbt.cell[1])[0], (origin-tbt.cell[0]-tbt.cell[1])[1]])))
radii = np.arange(np.amax([Rmin, dr]), np.amin([Rmax, newRmax])+dr, dr)
nradii = len(radii)
print('Radii entries:')
print(nradii, dr, radii)
# indices of atom within the various shells
# atoms in list ishell[i] belong to [radii[i], radii[i+1]]
#ishell = tbt.geom.close_sc(origin, R=radii, idx=tbt.a_dev)
#print('Close: DONE')
current_r = np.zeros((nradii, ntheta))
for ir, rr in enumerate(radii): # Loop over unique radii
current_t = np.zeros(ntheta)
counts_t = current_t.copy()
inR = np.where(r < rr)[0]
for id, a in zip(inds[inR], ac[inR]):
current_t[id] += a
counts_t[id] += 1
current_r[ir, :] = np.divide(current_t, counts_t)
# Write
np.savetxt(save, np.transpose(np.vstack([thetas, current_r])), delimiter='\t',
newline='\n', comments='', header=', '.join(str(e) for e in radii))
return radii, thetas, current_r
def plot_LDOS(geom, LDOS, figname='figure.png',
vmin=None, vmax=None):
import matplotlib.collections as collections
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
x, y = geom.xyz[:,0], geom.xyz[:,1]
fig, ax = plt.subplots()
ax.set_aspect('equal')
vmin, vmax = vmin, vmax
if vmin is None:
vmin = np.min(LDOS)
if vmax is None:
vmax = np.max(LDOS)
colors = LDOS
area = 15
image = ax.scatter(x, y, c=colors, s=area, marker='o', edgecolors='None', cmap='viridis')
image.set_clim(vmin, vmax)
image.set_array(LDOS)
ax.autoscale()
ax.margins(0.1)
plt.xlabel('$x (\AA)$')
plt.ylabel('$y (\AA)$')
plt.gcf()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
axcb = plt.colorbar(image, cax=cax, format='%1.2f', ticks=[vmin, vmax])
plt.savefig(figname, bbox_inches='tight', transparent=True, dpi=300)
print('Successfully plotted to "{}"'.format(figname))
def CAP(geometry, side, dz_CAP=30, write_xyz=True, zaxis=2):
# Determine orientation
if zaxis == 2:
xaxis, yaxis = 0, 1
elif zaxis == 0:
xaxis, yaxis = 1, 2
elif zaxis == 1:
xaxis, yaxis = 0, 2
# Natural units (see "http://superstringtheory.com/unitsa.html")
hbar = 1
m = 0.511e6 # eV
c = 2.62
print('\nSetting up CAP regions: {}'.format(side))
print('Width of absorbing walls = {} Angstrom'.format(dz_CAP))
Wmax = 100
dH_CAP = si.Hamiltonian(geometry, dtype='complex128')
CAP_list = []
### EDGES
if 'right' in side:
print('Setting at right')
z, y = geometry.xyz[:, xaxis], geometry.xyz[:, yaxis]
z2 = np.max(geometry.xyz[:, xaxis]) + 1.
z1 = z2 - dz_CAP
idx = np.where(np.logical_and(z1 <= z, z < z2))[0]
fz = (4/(c**2)) * ((dz_CAP/(z2-2*z1+z[idx]))**2 + (dz_CAP/(z2-z[idx]))**2 - 2 )
Wz = ((hbar**2)/(2*m)) * (2*np.pi/(dz_CAP/2000))**2 * fz
orbs = dH_CAP.geom.a2o(idx) # if you have just 1 orb per atom, then orb = ia
for orb,wz in zip(orbs, Wz):
dH_CAP[orb, orb] = complex(0, -wz)
CAP_list.append(idx)
#print(list2range_TBTblock(idx))
if 'left' in side:
print('Setting at left')
z, y = geometry.xyz[:, xaxis], geometry.xyz[:, yaxis]
z2 = | np.min(geometry.xyz[:, xaxis]) | numpy.min |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
"""Module description:
Generate phoneme durations from different aligned labels.
"""
# System imports.
import argparse
from collections import OrderedDict
import glob
import logging
import numpy as np
import os
import sys
from typing import Union, Any, List, Optional, cast
# Third-party imports.
# Local source tree imports.
from idiaptts.misc.normalisation.MeanStdDevExtractor import MeanStdDevExtractor
from idiaptts.misc.utils import makedirs_safe
from idiaptts.src.data_preparation.LabelGen import LabelGen
from idiaptts.src.data_preparation.NpzDataReader import NpzDataReader
class PhonemeDurationLabelGen(NpzDataReader, LabelGen):
"""Load phoneme durations from state-aligned HTK labels."""
# Extension of created duration labels.
ext_durations = ".dur"
# Number of states per phoneme in HTK labels.
num_states = 5
# Divisor for phoneme timings in HTK labels. Only used in gen_data.
min_phoneme_length = 50000
# Divisor for phoneme timings in MFA labels. Only used in gen_data.
frame_length_sec = 0.005
logger = logging.getLogger(__name__)
class Config(NpzDataReader.Config):
def __init__(self, *args,
norm_type: str = NpzDataReader.Config.NormType.MEAN_STDDEV,
load_as_matrix: bool = False,
pad_mode: str = 'constant',
label_type: str = 'full_state_align',
**kwargs) -> None:
super().__init__(*args, norm_type=norm_type, **kwargs)
self.load_as_matrix = load_as_matrix
if load_as_matrix:
assert pad_mode == 'edge', \
"Use \'edge\' pad_mode for duration matrix."
assert norm_type == NpzDataReader.Config.NormType.NONE, \
"Duration matrix should not be normalised."
self.label_type = label_type
def create_reader(self):
reader = PhonemeDurationLabelGen(self)
reader.get_normalisation_params()
return reader
def __init__(self, *args, **kwargs):
if len(args) == 1 \
and isinstance(args[0], PhonemeDurationLabelGen.Config):
config = args[0]
super().__init__(config)
self.load_as_matrix = config.load_as_matrix
self.label_type = config.label_type
self.legacy_getitem = False
else:
# LEGACY support
if len(args) > 0:
dir_labels = args[0]
if len(args) > 1:
self.load_as_matrix = args[1]
else:
self.load_as_matrix = kwargs.get("load_as_matrix", False)
else:
dir_labels = kwargs["dir_labels"]
self.load_as_matrix = kwargs.get("load_as_matrix", False)
super().__init__(
config=PhonemeDurationLabelGen._get_reader_config(
dir_labels, self.load_as_matrix,
label_type="full_state_align"))
self.label_type = "full_state_align"
self.legacy_getitem = True
if type(self.directory) in [tuple, list]:
self.dir_labels = self.directory[0]
else:
self.dir_labels = self.directory
@staticmethod
def _get_reader_config(dir_labels, load_as_matrix, label_type):
if load_as_matrix:
pad_mode = 'edge'
norm_type = NpzDataReader.Config.NormType.NONE
else:
pad_mode="constant"
norm_type = NpzDataReader.Config.NormType.MEAN_STDDEV
return PhonemeDurationLabelGen.Config(name="durations",
directory=dir_labels,
load_as_matrix=load_as_matrix,
pad_mode=pad_mode,
norm_type=norm_type,
label_type=label_type)
def __getitem__(self, id_name):
"""Return the preprocessed sample with the given id_name."""
sample_dict = super().__getitem__(id_name)
if self.legacy_getitem:
# LEGACY support
return sample_dict[self.output_names[0]]
else:
return sample_dict
def load(self, id_name: str):
logging.debug("Load duration for " + id_name)
try:
sample = super().load(id_name)
except FileNotFoundError:
# LEGACY support
id_name = os.path.splitext(os.path.basename(id_name))[0]
file_path = os.path.join(self.directory[0], id_name)
try:
archive = np.load(file_path + ".npz")
sample = archive["dur"]
except FileNotFoundError:
file_path += self.ext_durations
with open(file_path, 'r') as f:
sample = np.fromfile(f, dtype=np.float32)
# TODO: Change to label_state_align as in PhonemeLabelGen.
if self.label_type == "full_state_align":
sample = np.reshape(sample, [-1, self.num_states])
elif self.label_type == "mfa":
sample = sample[:, None]
else:
raise NotImplementedError("Unknown label type {}.".format(
self.label_type))
if sample.ndim < 2:
sample = sample[:, None]
if self.load_as_matrix:
sample = self.convert_to_matrix(sample)
return sample
def convert_to_matrix(self, sample):
# Sample is T x 5 in frames of 50000 * 100 ns = 5 ms.
# TODO: Has to be adapted for different frame shift?
return self.durations_to_hard_attention_matrix(
sample.sum(axis=1).astype(np.int))
@staticmethod
def durations_to_hard_attention_matrix(durations):
'''
Code from https://github.com/CSTR-Edinburgh/ophelia/blob/a754abdf54986c31c43c363db1a5f850df06fdc6/utils.py#L188
Take array of durations, return selection matrix to replace A in
attention mechanism.
E.g.:
durations_to_hard_attention_matrix(np.array([3,0,1,2]))
[[1. 1. 1. 0. 0. 0.]
[0. 0. 0. 0. 0. 0.]
[0. 0. 0. 1. 0. 0.]
[0. 0. 0. 0. 1. 1.]]
'''
num_phones = len(durations)
num_frames = durations.sum()
A = | np.zeros((num_frames, num_phones), dtype=np.float32) | numpy.zeros |
import matplotlib.pyplot as plt
import random
import pickle
from skimage.transform import rotate
from scipy import ndimage
from skimage.util import img_as_ubyte
from joblib import Parallel, delayed
from sklearn.ensemble.forest import _generate_unsampled_indices
from sklearn.ensemble.forest import _generate_sample_indices
import numpy as np
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from itertools import product
import keras
from keras import layers
from joblib import Parallel, delayed
from multiprocessing import Pool
import tensorflow as tf
from numba import cuda
import sys
sys.path.append("../../proglearn/")
from progressive_learner import ProgressiveLearner
from deciders import SimpleArgmaxAverage
from transformers import TreeClassificationTransformer, NeuralClassificationTransformer
from voters import TreeClassificationVoter, KNNClassificationVoter
def cross_val_data(data_x, data_y, total_cls=10):
x = data_x.copy()
y = data_y.copy()
idx = [np.where(data_y == u)[0] for u in np.unique(data_y)]
for i in range(total_cls):
indx = idx[i]#np.roll(idx[i],(cv-1)*100)
random.shuffle(indx)
if i==0:
train_x1 = x[indx[0:250],:]
train_x2 = x[indx[250:500],:]
train_y1 = y[indx[0:250]]
train_y2 = y[indx[250:500]]
test_x = x[indx[500:600],:]
test_y = y[indx[500:600]]
else:
train_x1 = np.concatenate((train_x1, x[indx[0:250],:]), axis=0)
train_x2 = np.concatenate((train_x2, x[indx[250:500],:]), axis=0)
train_y1 = | np.concatenate((train_y1, y[indx[0:250]]), axis=0) | numpy.concatenate |
# Imports
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from tqdm import tqdm
from math import cos, sin
from scipy.optimize import nnls
from numpy import sin, cos, arctan
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from PyDiffGame import ContinuousPyDiffGame
# Global Constants
g = 9.81
sum_theta_init = -3 / g
Ixx = 7.5e-3
Iyy = 7.5e-3
Izz = 1.3e-2
m = 0.65
l = 0.23
Jr = 6e-5
b = 3.13e-5
d = 7.5e-7
a1 = (Iyy - Izz) / Ixx
a2 = Jr / Ixx
a3 = (Izz - Ixx) / Iyy
a4 = Jr / Iyy
a5 = (Ixx - Iyy) / Izz
b1 = l / Ixx
b2 = l / Iyy
b3 = 1 / Izz
v_d_s_0_2 = 0
h_d_s_0_2 = 0
sum_theta = sum_theta_init
sum_theta_2 = 0
# Low-Level Control
def quad_rotor_state_diff_eqn_for_given_pqrT(X, _, p, q, r, T, Plast):
phi, dPhidt, theta, dThetadt, psi, dPsidt, z, dzdt, x, dxdt, y, dydt = X
u_x = cos(phi) * sin(theta) * cos(psi) + sin(phi) * sin(psi)
u_y = cos(phi) * sin(theta) * sin(psi) - sin(phi) * cos(psi)
K = cos(phi) * cos(theta) / m
U = low_level_angular_rate_controller([dPhidt, dThetadt, dPsidt], p, q, r, T, Plast)
omegas_squared_coeffs = np.array([[b] * 4,
[0, -b, 0, b],
[b, 0, -b, 0],
[-d, d, -d, d]
])
u1, u2, u3, u4 = U
omegas_squared = nnls(omegas_squared_coeffs, np.array([u1, u2, u3, u4]))[0]
omegas = np.sqrt(omegas_squared)
u5 = d * (omegas[0] - omegas[1] + omegas[2] - omegas[3])
dPhiddt = dThetadt * (dPsidt * a1 + u5 * a2) + b1 * u2
dThetaddt = dPhidt * (dPsidt * a3 - u5 * a4) + b2 * u3
dPsiddt = dThetadt * dPhidt * a5 + b3 * u4
dzddt = g - K * u1
dxddt = u_x * u1 / m
dyddt = u_y * u1 / m
return np.array([dPhidt, dPhiddt, dThetadt, dThetaddt, dPsidt, dPsiddt, dzdt,
dzddt, dxdt, dxddt, dydt, dyddt], dtype='float64')
def low_level_angular_rate_controller(x, p, q, r, T, Plast):
B1 = np.array([[b1],
[0],
[0]])
B2 = np.array([[0],
[b2],
[0]])
B3 = np.array([[0],
[0],
[b3]])
R1 = np.array([[0.1]])
R2 = np.array([[0.1]])
R3 = np.array([[0.1]])
B = [B1, B2, B3]
R = [R1, R2, R3]
P_sol = Plast
reduced_X = np.array(x) - np.array([p, q, r])
reduced_X_tr = reduced_X.forward_time
inv_Rs = [inv(r) for r in R]
B_t = [b.T for b in B]
U_angular = np.array([- r @ b @ p @ reduced_X_tr for r, b, p in zip(inv_Rs, B_t, P_sol)]).reshape(3, )
u2, u3, u4 = U_angular
U = [T, u2, u3, u4]
return U
def get_P_quad_given_angular_rates(x, P_sol):
A = np.array([[0, (1 / 2) * a1 * x[2], (1 / 2) * a1 * x[1]],
[(1 / 2) * a3 * x[2], 0, (1 / 2) * a3 * x[0]],
[(1 / 2) * a5 * x[1], (1 / 2) * a5 * x[0], 0]])
B1 = np.array([[b1],
[0],
[0]])
B2 = np.array([[0],
[b2],
[0]])
B3 = np.array([[0],
[0],
[b3]])
Q1 = np.array([[1000, 0, 0],
[0, 10, 0],
[0, 0, 10]])
Q2 = np.array([[10, 0, 0],
[0, 1000, 0],
[0, 0, 10]])
Q3 = np.array([[10, 0, 0],
[0, 10, 0],
[0, 0, 1000]])
R1 = np.array([[0.1]])
R2 = np.array([[0.1]])
R3 = np.array([[0.1]])
B = [B1, B2, B3]
R = [R1, R2, R3]
Q = [Q1, Q2, Q3]
P = ContinuousPyDiffGame(A=A, B=B, Q=Q, R=R, P_f=P_sol, cl=True, show_legend=False).solve_game_and_plot_state_space()
Plast = P[-1]
return Plast
# High-Level Control
def get_mf_numerator(F3, R11, F1, R31, a_y, R12, R32):
return F3 * R11 - F1 * R31 + a_y * (R12 * R31 - R11 * R32)
def get_mf_denominator(F2, R11, F1, R21, a_y, R22, R12):
return - F2 * R11 + F1 * R21 + a_y * (R11 * R22 - R12 * R21)
def get_mc_numerator(mf_numerator, a_z, R31, R13, R11, R33):
return mf_numerator + a_z * (R13 * R31 - R11 * R33)
def get_mc_denominator(mf_denominator, a_z, R11, R23):
return mf_denominator - a_z * R11 * R23
def hpf_ode_v_d_s(v_d_s, _, f_a, f_b, P_z_tilda):
return f_a * v_d_s + f_b * P_z_tilda
def hpf_ode_h_d_s(h_d_s, _, f_a, f_b, P_y_tilda):
return f_a * h_d_s + f_b * P_y_tilda
def calculate_Bs(u_sizes, dividing_matrix, B):
block_matrix = B @ dividing_matrix
Bs = []
last = 0
for u_size in u_sizes:
Bs += [block_matrix[:, last:u_size + last]]
last = u_size
return Bs
def wall_punishment(wall_distance, a_y):
return 3 * (10 ** 2) * (wall_distance / a_y) ** 2
def get_higher_level_control2(state, st, a_y):
global v_d_s_0_2, h_d_s_0_2, sum_theta, sum_theta_2
# a_y = 1
a_z = -2.5
x = state[8]
y = state[10]
z = state[6]
phi = state[0]
theta = state[2]
psi = state[4]
sphi = | sin(phi) | numpy.sin |