@@FeedFnHook
@@ProfilerHook
@@SecondOrStepTimer
@@global_step
@@basic_train_loop
@@get_global_step
@@get_or_create_global_step
@@create_global_step
@@assert_global_step
@@write_graph
@@load_checkpoint
@@load_variable
@@list_variables
@@init_from_checkpoint
"""
# Optimizers.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
from tensorflow.python.ops import io_ops as _io_ops
from tensorflow.python.ops import sdca_ops as _sdca_ops
from tensorflow.python.ops import state_ops as _state_ops
from tensorflow.python.util.all_util import remove_undocumented
# pylint: disable=g-bad-import-order,unused-import
from tensorflow.python.ops.sdca_ops import sdca_optimizer
from tensorflow.python.ops.sdca_ops import sdca_fprint
from tensorflow.python.ops.sdca_ops import sdca_shrink_l1
from tensorflow.python.training.adadelta import AdadeltaOptimizer
from tensorflow.python.training.adagrad import AdagradOptimizer
from tensorflow.python.training.adagrad_da import AdagradDAOptimizer
from tensorflow.python.training.proximal_adagrad import ProximalAdagradOptimizer
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.ftrl import FtrlOptimizer
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.moving_averages import ExponentialMovingAverage
from tensorflow.python.training.optimizer import Optimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.proximal_gradient_descent import ProximalGradientDescentOptimizer
from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizer
# Utility classes for training.
from tensorflow.python.training.coordinator import Coordinator
from tensorflow.python.training.coordinator import LooperThread
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.queue_runner import *
# For the module level doc.
from tensorflow.python.training import input as _input
from tensorflow.python.training.input import *
# pylint: enable=wildcard-import
from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer
from tensorflow.python.training.basic_session_run_hooks import LoggingTensorHook
from tensorflow.python.training.basic_session_run_hooks import StopAtStepHook
from tensorflow.python.training.basic_session_run_hooks import CheckpointSaverHook
from tensorflow.python.training.basic_session_run_hooks import CheckpointSaverListener
from tensorflow.python.training.basic_session_run_hooks import StepCounterHook
from tensorflow.python.training.basic_session_run_hooks import NanLossDuringTrainingError
from tensorflow.python.training.basic_session_run_hooks import NanTensorHook
from tensorflow.python.training.basic_session_run_hooks import SummarySaverHook
from tensorflow.python.training.basic_session_run_hooks import GlobalStepWaiterHook
from tensorflow.python.training.basic_session_run_hooks import FinalOpsHook
from tensorflow.python.training.basic_session_run_hooks import FeedFnHook
from tensorflow.python.training.basic_session_run_hooks import ProfilerHook
from tensorflow.python.training.basic_loops import basic_train_loop
from tensorflow.python.training.checkpoint_utils import init_from_checkpoint
from tensorflow.python.training.checkpoint_utils import list_variables
from tensorflow.python.training.checkpoint_utils import load_checkpoint
from tensorflow.python.training.checkpoint_utils import load_variable
from tensorflow.python.training.device_setter import replica_device_setter
from tensorflow.python.training.monitored_session import Scaffold
from tensorflow.python.training.monitored_session import MonitoredTrainingSession
from tensorflow.python.training.monitored_session import SessionCreator
from tensorflow.python.training.monitored_session import ChiefSessionCreator
from tensorflow.python.training.monitored_session import WorkerSessionCreator
from tensorflow.python.training.monitored_session import MonitoredSession
from tensorflow.python.training.monitored_session import SingularMonitoredSession
from tensorflow.python.training.saver import Saver
from tensorflow.python.training.saver import checkpoint_exists
from tensorflow.python.training.saver import generate_checkpoint_state_proto
from tensorflow.python.training.saver import get_checkpoint_mtimes
from tensorflow.python.training.saver import get_checkpoint_state
from tensorflow.python.training.saver import latest_checkpoint
from tensorflow.python.training.saver import update_checkpoint_state
from tensorflow.python.training.saver import export_meta_graph
from tensorflow.python.training.saver import import_meta_graph
from tensorflow.python.training.session_run_hook import SessionRunHook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.session_run_hook import SessionRunContext
from tensorflow.python.training.session_run_hook import SessionRunValues
from tensorflow.python.training.session_manager import SessionManager
from tensorflow.python.training.summary_io import summary_iterator
from tensorflow.python.training.supervisor import Supervisor
from tensorflow.python.training.training_util import write_graph
from tensorflow.python.training.training_util import global_step
from tensorflow.python.training.training_util import get_global_step
from tensorflow.python.training.training_util import assert_global_step
from tensorflow.python.training.training_util import create_global_step
from tensorflow.python.training.training_util import get_or_create_global_step
from tensorflow.python.pywrap_tensorflow import do_quantize_training_on_graphdef
from tensorflow.python.pywrap_tensorflow import NewCheckpointReader
# pylint: disable=wildcard-import
# Training data protos.
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
from tensorflow.core.protobuf.saver_pb2 import *
# Utility op. Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import *
# pylint: enable=wildcard-import
# Distributed computing support.
from tensorflow.core.protobuf.cluster_pb2 import ClusterDef
from tensorflow.core.protobuf.cluster_pb2 import JobDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.training.server_lib import Server
# Symbols whitelisted for export without documentation.
_allowed_symbols = [
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
"generate_checkpoint_state_proto", # Used internally by saver.
"checkpoint_exists", # Only used in test?
"get_checkpoint_mtimes", # Only used in test?
# Legacy: remove.
"do_quantize_training_on_graphdef", # At least use grah_def, not graphdef.
# No uses within tensorflow.
"queue_runner", # Use tf.train.start_queue_runner etc directly.
# This is also imported internally.
# TODO(drpng): document these. The reference in howtos/distributed does
# not link.
"SyncReplicasOptimizer",
# Protobufs:
"BytesList", # from example_pb2.
"ClusterDef",
"Example", # from example_pb2
"Feature", # from example_pb2
"Features", # from example_pb2
"FeatureList", # from example_pb2
"FeatureLists", # from example_pb2
"FloatList", # from example_pb2.
"Int64List", # from example_pb2.
"JobDef",
"SaverDef", # From saver_pb2.
"SequenceExample", # from example_pb2.
"ServerDef",
]
# Include extra modules for docstrings because:
# * Input methods in tf.train are documented in io_ops.
# * Saver methods in tf.train are documented in state_ops.
remove_undocumented(__name__, _allowed_symbols,
[_sys.modules[__name__], _io_ops, _sdca_ops, _state_ops])
import skimage
import skimage.io
import skimage.transform
import numpy as np
# synset = [l.strip() for l in open('synset.txt').readlines()]
# returns image of shape [224, 224, 3]
# [height, width, depth]
def load_image(path):
# load image
img = skimage.io.imread(path)
img = img / 255.0
assert (0 <= img).all() and (img <= 1.0).all()
# print "Original Image Shape: ", img.shape
# we crop image from center
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy: yy + short_edge, xx: xx + short_edge]
# resize to 224, 224
resized_img = skimage.transform.resize(crop_img, (224, 224), mode='constant')
return resized_img
# returns the top1 string
def print_prob(prob, file_path):
synset = [l.strip() for l in open(file_path).readlines()]
# print prob
pred = np.argsort(prob)[::-1]
# Get top1 label
top1 = synset[pred[0]]
print(("Top1: ", top1, prob[pred[0]]))
# Get top5 label
top5 = [(synset[pred[i]], prob[pred[i]]) for i in range(5)]
print(("Top5: ", top5))
return top1
def load_image2(path, height=None, width=None):
# load image
img = skimage.io.imread(path)
img = img / 255.0
if height is not None and width is not None:
ny = height
nx = width
elif height is not None:
ny = height
nx = img.shape[1] * ny / img.shape[0]
elif width is not None:
nx = width
ny = img.shape[0] * nx / img.shape[1]
else:
ny = img.shape[0]
nx = img.shape[1]
return skimage.transform.resize(img, (ny, nx), mode='constant')
def test():
img = skimage.io.imread("./test_data/starry_night.jpg")
ny = 300
nx = img.shape[1] * ny / img.shape[0]
img = skimage.transform.resize(img, (ny, nx), mode='constant')
skimage.io.imsave("./test_data/test/output.jpg", img)
if __name__ == "__main__":
test()
from .util import threading
import logging
log = logging.getLogger(__name__)
class LockError(Exception):
pass
class ReadWriteMutex(object):
"""A mutex which allows multiple readers, single writer.
:class:`.ReadWriteMutex` uses a Python ``threading.Condition``
to provide this functionality across threads within a process.
The Beaker package also contained a file-lock based version
of this concept, so that readers/writers could be synchronized
across processes with a common filesystem. A future Dogpile
release may include this additional class at some point.
"""
def __init__(self):
# counts how many asynchronous methods are executing
self.async = 0
# pointer to thread that is the current sync operation
self.current_sync_operation = None
# condition object to lock on
self.condition = threading.Condition(threading.Lock())
def acquire_read_lock(self, wait = True):
"""Acquire the 'read' lock."""
self.condition.acquire()
try:
# see if a synchronous operation is waiting to start
# or is already running, in which case we wait (or just
# give up and return)
if wait:
while self.current_sync_operation is not None:
self.condition.wait()
else:
if self.current_sync_operation is not None:
return False
self.async += 1
log.debug("%s acquired read lock", self)
finally:
self.condition.release()
if not wait:
return True
def release_read_lock(self):
"""Release the 'read' lock."""
self.condition.acquire()
try:
self.async -= 1
# check if we are the last asynchronous reader thread
# out the door.
if self.async == 0:
# yes. so if a sync operation is waiting, notifyAll to wake
# it up
if self.current_sync_operation is not None:
self.condition.notifyAll()
elif self.async < 0:
raise LockError("Synchronizer error - too many "
"release_read_locks called")
log.debug("%s released read lock", self)
finally:
self.condition.release()
def acquire_write_lock(self, wait = True):
"""Acquire the 'write' lock."""
self.condition.acquire()
try:
# here, we are not a synchronous reader, and after returning,
# assuming waiting or immediate availability, we will be.
if wait:
# if another sync is working, wait
while self.current_sync_operation is not None:
self.condition.wait()
else:
# if another sync is working,
# we dont want to wait, so forget it
if self.current_sync_operation is not None:
return False
# establish ourselves as the current sync
# this indicates to other read/write operations
# that they should wait until this is None again
self.current_sync_operation = threading.currentThread()
# now wait again for asyncs to finish
if self.async > 0:
if wait:
# wait
self.condition.wait()
else:
# we dont want to wait, so forget it
self.current_sync_operation = None
return False
log.debug("%s acquired write lock", self)
finally:
self.condition.release()
if not wait:
return True
def release_write_lock(self):
"""Release the 'write' lock."""
self.condition.acquire()
try:
if self.current_sync_operation is not threading.currentThread():
raise LockError("Synchronizer error - current thread doesn't "
"have the write lock")
# reset the current sync operation so
# another can get it
self.current_sync_operation = None
# tell everyone to get ready
self.condition.notifyAll()
log.debug("%s released write lock", self)
finally:
# everyone go !!
self.condition.release()
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import __builtin__
import StringIO
from nova import test
from nova.virt.baremetal import tilera
class TileraBareMetalNodesTestCase(test.TestCase):
def setUp(self):
super(TileraBareMetalNodesTestCase, self).setUp()
self.board_info = "\n".join([
'# board_id ip_address mac_address 00:1A:CA:00:57:90 '
'00:1A:CA:00:58:98 00:1A:CA:00:58:50',
'6 10.0.2.7 00:1A:CA:00:58:5C 10 16218 917 476 1 tilera_hv 1 '
'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
'"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
'"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
'"topology":{"cores":"64"}}',
'7 10.0.2.8 00:1A:CA:00:58:A4 10 16218 917 476 1 tilera_hv 1 '
'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
'"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
'"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
'"topology":{"cores":"64"}}',
'8 10.0.2.9 00:1A:CA:00:58:1A 10 16218 917 476 1 tilera_hv 1 '
'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
'"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
'"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
'"topology":{"cores":"64"}}',
'9 10.0.2.10 00:1A:CA:00:58:38 10 16385 1000 0 0 tilera_hv 1 '
'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
'"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
'"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
'"topology":{"cores":"64"}}'])
def tearDown(self):
super(TileraBareMetalNodesTestCase, self).tearDown()
# Reset the singleton state
tilera.BareMetalNodes._instance = None
tilera.BareMetalNodes._is_init = False
def test_singleton(self):
"""Confirm that the object acts like a singleton.
In this case, we check that it only loads the config file once,
even though it has been instantiated multiple times"""
self.mox.StubOutWithMock(__builtin__, 'open')
open("/tftpboot/tilera_boards",
"r").AndReturn(StringIO.StringIO(self.board_info))
self.mox.ReplayAll()
nodes = tilera.BareMetalNodes("/tftpboot/tilera_boards")
nodes = tilera.BareMetalNodes("/tftpboot/tilera_boards")
def test_get_hw_info(self):
self.mox.StubOutWithMock(__builtin__, 'open')
open("/tftpboot/tilera_boards",
"r").AndReturn(StringIO.StringIO(self.board_info))
self.mox.ReplayAll()
nodes = tilera.BareMetalNodes()
self.assertEqual(nodes.get_hw_info('vcpus'), 10)
import os
import urlparse
from StringIO import StringIO
blacklist = ["/", "/tools/", "/resources/", "/common/", "/conformance-checkers/", "_certs"]
def rel_path_to_url(rel_path, url_base="/"):
assert not os.path.isabs(rel_path)
if url_base[0] != "/":
url_base = "/" + url_base
if url_base[-1] != "/":
url_base += "/"
return url_base + rel_path.replace(os.sep, "/")
def is_blacklisted(url):
for item in blacklist:
if item == "/":
if "/" not in url[1:]:
return True
elif url.startswith(item):
return True
return False
class ContextManagerStringIO(StringIO):
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
class cached_property(object):
def __init__(self, func):
self.func = func
self.__doc__ = getattr(func, "__doc__")
self.name = func.__name__
def __get__(self, obj, cls=None):
if obj is None:
return self
if self.name not in obj.__dict__:
obj.__dict__[self.name] = self.func(obj)
return obj.__dict__[self.name]
""" Python Character Mapping Codec mac_turkish generated from 'MAPPINGS/VENDORS/APPLE/TURKISH.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-turkish',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u011e' # 0xDA -> LATIN CAPITAL LETTER G WITH BREVE
u'\u011f' # 0xDB -> LATIN SMALL LETTER G WITH BREVE
u'\u0130' # 0xDC -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u0131' # 0xDD -> LATIN SMALL LETTER DOTLESS I
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u015f' # 0xDF -> LATIN SMALL LETTER S WITH CEDILLA
u'\u2021' # 0xE0 -> DOUBLE DAGGER
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\uf8a0' # 0xF5 -> undefined1
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
'''
import re,urlparse,json,base64
from resources.lib.modules import cache
from resources.lib.modules import control
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['ororo.tv']
self.base_link = 'https://ororo.tv'
self.moviesearch_link = '/api/v2/movies'
self.tvsearch_link = '/api/v2/shows'
self.movie_link = '/api/v2/movies/%s'
self.show_link = '/api/v2/shows/%s'
self.episode_link = '/api/v2/episodes/%s'
self.user = control.setting('ororo.user')
self.password = control.setting('ororo.pass')
self.headers = {
'Authorization': 'Basic %s' % base64.b64encode('%s:%s' % (self.user, self.password)),
'User-Agent': 'Exodus for Kodi'
}
def movie(self, imdb, title, localtitle, aliases, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
url = cache.get(self.ororo_moviecache, 60, self.user)
url = [i[0] for i in url if imdb == i[1]][0]
url= self.movie_link % url
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
url = cache.get(self.ororo_tvcache, 120, self.user)
url = [i[0] for i in url if imdb == i[1]][0]
url= self.show_link % url
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if (self.user == '' or self.password == ''): raise Exception()
if url == None: return
url = urlparse.urljoin(self.base_link, url)
r = client.request(url, headers=self.headers)
r = json.loads(r)['episodes']
r = [(str(i['id']), str(i['season']), str(i['number']), str(i['airdate'])) for i in r]
url = [i for i in r if season == '%01d' % int(i[1]) and episode == '%01d' % int(i[2])]
url += [i for i in r if premiered == i[3]]
url= self.episode_link % url[0][0]
return url
except:
return
def ororo_moviecache(self, user):
try:
url = urlparse.urljoin(self.base_link, self.moviesearch_link)
r = client.request(url, headers=self.headers)
r = json.loads(r)['movies']
r = [(str(i['id']), str(i['imdb_id'])) for i in r]
r = [(i[0], 'tt' + re.sub('[^0-9]', '', i[1])) for i in r]
return r
except:
return
def ororo_tvcache(self, user):
try:
url = urlparse.urljoin(self.base_link, self.tvsearch_link)
r = client.request(url, headers=self.headers)
r = json.loads(r)['shows']
r = [(str(i['id']), str(i['imdb_id'])) for i in r]
r = [(i[0], 'tt' + re.sub('[^0-9]', '', i[1])) for i in r]
return r
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if (self.user == '' or self.password == ''): raise Exception()
url = urlparse.urljoin(self.base_link, url)
url = client.request(url, headers=self.headers)
url = json.loads(url)['url']
sources.append({'source': 'ororo', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
import os
import os.path
from subprocess import Popen, PIPE, call
import re
DOCUMENTATION = '''
---
module: locale_gen
short_description: Creates or removes locales.
description:
- Manages locales by editing /etc/locale.gen and invoking locale-gen.
version_added: "1.6"
author: "Augustus Kling (@AugustusKling)"
options:
name:
description:
- Name and encoding of the locale, such as "en_GB.UTF-8".
required: true
default: null
aliases: []
state:
description:
- Whether the locale shall be present.
required: false
choices: ["present", "absent"]
default: "present"
'''
EXAMPLES = '''
# Ensure a locale exists.
- locale_gen: name=de_CH.UTF-8 state=present
'''
LOCALE_NORMALIZATION = {
".utf8": ".UTF-8",
".eucjp": ".EUC-JP",
".iso885915": ".ISO-8859-15",
".cp1251": ".CP1251",
".koi8r": ".KOI8-R",
".armscii8": ".ARMSCII-8",
".euckr": ".EUC-KR",
".gbk": ".GBK",
".gb18030": ".GB18030",
".euctw": ".EUC-TW",
}
# ===========================================
# location module specific support methods.
#
def is_available(name, ubuntuMode):
"""Check if the given locale is available on the system. This is done by
checking either :
* if the locale is present in /etc/locales.gen
* or if the locale is present in /usr/share/i18n/SUPPORTED"""
if ubuntuMode:
__regexp = '^(?P\S+_\S+) (?P\S+)\s*$'
__locales_available = '/usr/share/i18n/SUPPORTED'
else:
__regexp = '^#{0,1}\s*(?P\S+_\S+) (?P\S+)\s*$'
__locales_available = '/etc/locale.gen'
re_compiled = re.compile(__regexp)
fd = open(__locales_available, 'r')
for line in fd:
result = re_compiled.match(line)
if result and result.group('locale') == name:
return True
fd.close()
return False
def is_present(name):
"""Checks if the given locale is currently installed."""
output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
return any(fix_case(name) == fix_case(line) for line in output.splitlines())
def fix_case(name):
"""locale -a might return the encoding in either lower or upper case.
Passing through this function makes them uniform for comparisons."""
for s, r in LOCALE_NORMALIZATION.iteritems():
name = name.replace(s, r)
return name
def replace_line(existing_line, new_line):
"""Replaces lines in /etc/locale.gen"""
try:
f = open("/etc/locale.gen", "r")
lines = [line.replace(existing_line, new_line) for line in f]
finally:
f.close()
try:
f = open("/etc/locale.gen", "w")
f.write("".join(lines))
finally:
f.close()
def set_locale(name, enabled=True):
""" Sets the state of the locale. Defaults to enabled. """
search_string = '#{0,1}\s*%s (?P.+)' % name
if enabled:
new_string = '%s \g' % (name)
else:
new_string = '# %s \g' % (name)
try:
f = open("/etc/locale.gen", "r")
lines = [re.sub(search_string, new_string, line) for line in f]
finally:
f.close()
try:
f = open("/etc/locale.gen", "w")
f.write("".join(lines))
finally:
f.close()
def apply_change(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState=="present":
# Create locale.
set_locale(name, enabled=True)
else:
# Delete locale.
set_locale(name, enabled=False)
localeGenExitValue = call("locale-gen")
if localeGenExitValue!=0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
def apply_change_ubuntu(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState=="present":
# Create locale.
# Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
localeGenExitValue = call(["locale-gen", name])
else:
# Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
try:
f = open("/var/lib/locales/supported.d/local", "r")
content = f.readlines()
finally:
f.close()
try:
f = open("/var/lib/locales/supported.d/local", "w")
for line in content:
locale, charset = line.split(' ')
if locale != name:
f.write(line)
finally:
f.close()
# Purge locales and regenerate.
# Please provide a patch if you know how to avoid regenerating the locales to keep!
localeGenExitValue = call(["locale-gen", "--purge"])
if localeGenExitValue!=0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
# ==============================================================
# main
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['present','absent'], default='present'),
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
if not os.path.exists("/etc/locale.gen"):
if os.path.exists("/var/lib/locales/supported.d/"):
# Ubuntu created its own system to manage locales.
ubuntuMode = True
else:
module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
else:
# We found the common way to manage locales.
ubuntuMode = False
if not is_available(name, ubuntuMode):
module.fail_json(msg="The locales you've entered is not available "
"on your system.")
if is_present(name):
prev_state = "present"
else:
prev_state = "absent"
changed = (prev_state!=state)
if module.check_mode:
module.exit_json(changed=changed)
else:
if changed:
try:
if ubuntuMode==False:
apply_change(state, name)
else:
apply_change_ubuntu(state, name)
except EnvironmentError, e:
module.fail_json(msg=e.strerror, exitValue=e.errno)
module.exit_json(name=name, changed=changed, msg="OK")
# import module snippets
from ansible.module_utils.basic import *
main()
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop ROL_R_I
{
roli reg, reg, imm, flags=(OF,CF)
};
def macroop ROL_M_I
{
ldst t1, seg, sib, disp
roli t1, t1, imm, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROL_P_I
{
rdip t7
ldst t1, seg, riprel, disp
roli t1, t1, imm, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop ROL_1_R
{
roli reg, reg, 1, flags=(OF,CF)
};
def macroop ROL_1_M
{
ldst t1, seg, sib, disp
roli t1, t1, 1, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROL_1_P
{
rdip t7
ldst t1, seg, riprel, disp
roli t1, t1, 1, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop ROL_R_R
{
rol reg, reg, regm, flags=(OF,CF)
};
def macroop ROL_M_R
{
ldst t1, seg, sib, disp
rol t1, t1, reg, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROL_P_R
{
rdip t7
ldst t1, seg, riprel, disp
rol t1, t1, reg, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop ROR_R_I
{
rori reg, reg, imm, flags=(OF,CF)
};
def macroop ROR_M_I
{
ldst t1, seg, sib, disp
rori t1, t1, imm, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROR_P_I
{
rdip t7
ldst t1, seg, riprel, disp
rori t1, t1, imm, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop ROR_1_R
{
rori reg, reg, 1, flags=(OF,CF)
};
def macroop ROR_1_M
{
ldst t1, seg, sib, disp
rori t1, t1, 1, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROR_1_P
{
rdip t7
ldst t1, seg, riprel, disp
rori t1, t1, 1, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop ROR_R_R
{
ror reg, reg, regm, flags=(OF,CF)
};
def macroop ROR_M_R
{
ldst t1, seg, sib, disp
ror t1, t1, reg, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
ror t1, t1, reg, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCL_R_I
{
rcli reg, reg, imm, flags=(OF,CF)
};
def macroop RCL_M_I
{
ldst t1, seg, sib, disp
rcli t1, t1, imm, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCL_P_I
{
rdip t7
ldst t1, seg, riprel, disp
rcli t1, t1, imm, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCL_1_R
{
rcli reg, reg, 1, flags=(OF,CF)
};
def macroop RCL_1_M
{
ldst t1, seg, sib, disp
rcli t1, t1, 1, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCL_1_P
{
rdip t7
ldst t1, seg, riprel, disp
rcli t1, t1, 1, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCL_R_R
{
rcl reg, reg, regm, flags=(OF,CF)
};
def macroop RCL_M_R
{
ldst t1, seg, sib, disp
rcl t1, t1, reg, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCL_P_R
{
rdip t7
ldst t1, seg, riprel, disp
rcl t1, t1, reg, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCR_R_I
{
rcri reg, reg, imm, flags=(OF,CF)
};
def macroop RCR_M_I
{
ldst t1, seg, sib, disp
rcri t1, t1, imm, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCR_P_I
{
rdip t7
ldst t1, seg, riprel, disp
rcri t1, t1, imm, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCR_1_R
{
rcri reg, reg, 1, flags=(OF,CF)
};
def macroop RCR_1_M
{
ldst t1, seg, sib, disp
rcri t1, t1, 1, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCR_1_P
{
rdip t7
ldst t1, seg, riprel, disp
rcri t1, t1, 1, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCR_R_R
{
rcr reg, reg, regm, flags=(OF,CF)
};
def macroop RCR_M_R
{
ldst t1, seg, sib, disp
rcr t1, t1, reg, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
rcr t1, t1, reg, flags=(OF,CF)
st t1, seg, riprel, disp
};
'''
# orm/interfaces.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
#
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines some key base classes prominent within the internals,
as well as the now-deprecated ORM extension classes.
Other than the deprecated extensions, this module and the
classes within are mostly private, though some attributes
are exposed when inspecting mappings.
"""
from __future__ import absolute_import
from .. import util
from ..sql import operators
from .base import (ONETOMANY, MANYTOONE, MANYTOMANY,
EXT_CONTINUE, EXT_STOP, NOT_EXTENSION)
from .base import (InspectionAttr, InspectionAttr,
InspectionAttrInfo, _MappedAttribute)
import collections
from .. import inspect
# imported later
MapperExtension = SessionExtension = AttributeExtension = None
__all__ = (
'AttributeExtension',
'EXT_CONTINUE',
'EXT_STOP',
'ONETOMANY',
'MANYTOMANY',
'MANYTOONE',
'NOT_EXTENSION',
'LoaderStrategy',
'MapperExtension',
'MapperOption',
'MapperProperty',
'PropComparator',
'SessionExtension',
'StrategizedProperty',
)
class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots):
"""Represent a particular class attribute mapped by :class:`.Mapper`.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
__slots__ = (
'_configure_started', '_configure_finished', 'parent', 'key',
'info'
)
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
The collection typically only applies to a RelationshipProperty.
"""
is_property = True
"""Part of the InspectionAttr interface; states this object is a
mapper property.
"""
def _memoized_attr_info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`.relationship`, or :func:`.composite`
functions.
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
def setup(self, context, entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
def create_row_processor(self, context, path,
mapper, result, adapter, populators):
"""Produce row processing functions and append to the given
set of populators lists.
"""
def cascade_iterator(self, type_, state, visited_instances=None,
halt_on=None):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
This method typically only applies to RelationshipProperty.
"""
return iter(())
def set_parent(self, parent, init):
"""Set the parent mapper that references this MapperProperty.
This method is overridden by some subclasses to perform extra
setup when the mapper is first known.
"""
self.parent = parent
def instrument_class(self, mapper):
"""Hook called by the Mapper to the property to initiate
instrumentation of the class attribute managed by this
MapperProperty.
The MapperProperty here will typically call out to the
attributes module to set up an InstrumentedAttribute.
This step is the first of two steps to set up an InstrumentedAttribute,
and is called early in the mapper setup process.
The second step is typically the init_class_attribute step,
called from StrategizedProperty via the post_instrument_class()
hook. This step assigns additional state to the InstrumentedAttribute
(specifically the "impl") which has been determined after the
MapperProperty has determined what kind of persistence
management it needs to do (e.g. scalar, object, collection, etc).
"""
def __init__(self):
self._configure_started = False
self._configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
The given Mapper is the Mapper invoking the operation, which
may not be the same Mapper as self.parent in an inheritance
scenario; however, Mapper will always at least be a sub-mapper of
self.parent.
This method is typically used by StrategizedProperty, which delegates
it to LoaderStrategy.init_class_attribute() to perform final setup
on the class-bound InstrumentedAttribute.
"""
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object.
"""
def __repr__(self):
return '<%s at 0x%x; %s>' % (
self.__class__.__name__,
id(self), getattr(self, 'key', 'no key'))
class PropComparator(operators.ColumnOperators):
"""Defines SQL operators for :class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \\
ColumnProperty,\\
CompositeProperty,\\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
See also:
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = 'prop', 'property', '_parententity', '_adapt_to_entity'
def __init__(self, prop, parentmapper, adapt_to_entity=None):
self.prop = self.property = prop
self._parententity = parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def _query_clause_element(self):
return self.__clause_element__()
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parententity, adapt_to_entity)
@property
def _parentmapper(self):
"""legacy; this is renamed to _parententity to be
compatible with QueryableAttribute."""
return inspect(self._parententity).mapper
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
"""Redefine this object in terms of a polymorphic subclass.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
"""
return self.operate(PropComparator.of_type_op, class_)
def any(self, criterion=None, **kwargs):
"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
The mechanics of StrategizedProperty are used for every Query
invocation for every mapped attribute participating in that Query,
to determine first how the attribute will be rendered in SQL
and secondly how the attribute will retrieve a value from a result
row and apply it to a mapped object. The routines here are very
performance-critical.
"""
__slots__ = '_strategies', 'strategy'
strategy_wildcard_key = None
def _get_context_loader(self, context, path):
load = None
# use EntityRegistry.__getitem__()->PropRegistry here so
# that the path is stated in terms of our base
search_path = dict.__getitem__(path, self)
# search among: exact match, "attr.*", "default" strategy
# if any.
for path_key in (
search_path._loader_key,
search_path._wildcard_path_loader_key,
search_path._default_path_loader_key
):
if path_key in context.attributes:
load = context.attributes[path_key]
break
return load
def _get_strategy(self, key):
try:
return self._strategies[key]
except KeyError:
cls = self._strategy_lookup(*key)
self._strategies[key] = self._strategies[
cls] = strategy = cls(self)
return strategy
def _get_strategy_by_cls(self, cls):
return self._get_strategy(cls._strategy_keys[0])
def setup(
self, context, entity, path, adapter, **kwargs):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.setup_query(context, entity, path, loader, adapter, **kwargs)
def create_row_processor(
self, context, path, mapper,
result, adapter, populators):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.create_row_processor(
context, path, loader,
mapper, result, adapter, populators)
def do_init(self):
self._strategies = {}
self.strategy = self._get_strategy_by_cls(self.strategy_class)
def post_instrument_class(self, mapper):
if not self.parent.non_primary and \
not mapper.class_manager._attr_has_impl(self.key):
self.strategy.init_class_attribute(mapper)
_all_strategies = collections.defaultdict(dict)
@classmethod
def strategy_for(cls, **kw):
def decorate(dec_cls):
# ensure each subclass of the strategy has its
# own _strategy_keys collection
if '_strategy_keys' not in dec_cls.__dict__:
dec_cls._strategy_keys = []
key = tuple(sorted(kw.items()))
cls._all_strategies[cls][key] = dec_cls
dec_cls._strategy_keys.append(key)
return dec_cls
return decorate
@classmethod
def _strategy_lookup(cls, *key):
for prop_cls in cls.__mro__:
if prop_cls in cls._all_strategies:
strategies = cls._all_strategies[prop_cls]
try:
return strategies[key]
except KeyError:
pass
raise Exception("can't locate strategy for %s %s" % (cls, key))
class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def process_query(self, query):
"""Apply a modification to the given :class:`.Query`."""
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
This is typically used during a lazy load or scalar refresh
operation to propagate options stated in the original Query to the
new Query being used for the load. It occurs for those options that
specify propagate_to_loaders=True.
"""
self.process_query(query)
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
For example, simple column attributes will add their represented
column to the list of selected columns, a joined eager loader
may establish join clauses to add to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
__slots__ = 'parent_property', 'is_class_level', 'parent', 'key'
def __init__(self, parent):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
def init_class_attribute(self, mapper):
pass
def setup_query(self, context, entity, path, loadopt, adapter, **kwargs):
"""Establish column and other state for a given QueryContext.
This method fulfills the contract specified by MapperProperty.setup().
StrategizedProperty delegates its setup() method
directly to this method.
"""
def create_row_processor(self, context, path, loadopt, mapper,
result, adapter, populators):
"""Establish row processing functions for a given QueryContext.
This method fulfills the contract specified by
MapperProperty.create_row_processor().
StrategizedProperty delegates its create_row_processor() method
directly to this method.
"""
def __str__(self):
return str(self.parent_property)
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NDHWC", False), ("NDHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCDHW" format is only supported on CUDA.
test_configs += [("NCDHW", True)]
return test_configs
class Conv3DTest(test.TestCase):
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
padding, data_format, use_gpu):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if data_format == "NCDHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv3d(t1, t2, strides, padding=padding,
data_format=data_format)
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
results = []
for data_format, use_gpu in GetTestConfigs():
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_format,
use_gpu=use_gpu)
results.append(result)
tolerance = 1e-2 if use_gpu else 1e-5
with self.test_session() as sess:
values = sess.run(results)
for value in values:
print("expected = ", expected)
print("actual = ", value)
self.assertAllClose(expected, value.flatten(), atol=tolerance,
rtol=1e-6)
def testConv3D1x1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
# These are equivalent to the Conv2D1x1 case.
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 1, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 1, 2, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
# Expected values computed using scipy's correlate function.
def testConv3D2x2x2Filter(self):
expected_output = [
19554., 19962., 20370., 22110., 22590., 23070., 34890., 35730., 36570.,
37446., 38358., 39270., 50226., 51498., 52770., 52782., 54126., 55470.
]
# expected_shape = [1, 3, 1, 2, 5]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
stride=1,
padding="VALID",
expected=expected_output)
def testConv3DStrides(self):
expected_output = [
102.,
151.,
172.,
193.,
214.,
235.,
142.,
438.,
592.,
613.,
634.,
655.,
676.,
394.,
774.,
1033.,
1054.,
1075.,
1096.,
1117.,
646.,
1894.,
2503.,
2524.,
2545.,
2566.,
2587.,
1486.,
2230.,
2944.,
2965.,
2986.,
3007.,
3028.,
1738.,
2566.,
3385.,
3406.,
3427.,
3448.,
3469.,
1990.,
3686.,
4855.,
4876.,
4897.,
4918.,
4939.,
2830.,
4022.,
5296.,
5317.,
5338.,
5359.,
5380.,
3082.,
4358.,
5737.,
5758.,
5779.,
5800.,
5821.,
3334.,
]
self._VerifyValues(
tensor_in_sizes=[1, 5, 8, 7, 1],
filter_in_sizes=[1, 2, 3, 1, 1],
stride=[2, 3, 1], # different stride for each spatial dimension
padding="SAME",
expected=expected_output)
def testConv3D2x2x2FilterStride2(self):
expected_output = [19554., 19962., 20370., 50226., 51498., 52770.]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv3DStride3(self):
expected_output = [
36564., 38022., 39480., 37824., 39354., 40884., 39084., 40686., 42288.,
46644., 48678., 50712., 47904., 50010., 52116., 49164., 51342., 53520.,
107124., 112614., 118104., 108384., 113946., 119508., 109644., 115278.,
120912., 117204., 123270., 129336., 118464., 124602., 130740., 119724.,
125934., 132144.
]
self._VerifyValues(
tensor_in_sizes=[1, 6, 7, 8, 2],
filter_in_sizes=[3, 2, 1, 2, 3],
stride=3,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2FilterStride2Same(self):
expected_output = [
19554., 19962., 20370., 10452., 10710., 10968., 50226., 51498., 52770.,
23844., 24534., 25224.
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
def testKernelSmallerThanStride(self):
expected_output = [1., 3., 7., 9., 19., 21., 25., 27.]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="SAME",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="VALID",
expected=expected_output)
expected_output = [
1484., 1592., 770., 2240., 2348., 1106., 1149., 1191., 539., 6776.,
6884., 3122., 7532., 7640., 3458., 3207., 3249., 1421., 3005., 3035.,
1225., 3215., 3245., 1309., 1013., 1022., 343.
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="SAME",
expected=expected_output)
expected_output = [1484., 1592., 2240., 2348., 6776., 6884., 7532., 7640.]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="VALID",
expected=expected_output)
def testKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 2, 1],
filter_in_sizes=[2, 1, 2, 1, 2],
stride=1,
padding="VALID",
expected=[50, 60])
def _ConstructAndTestGradientForConfig(
self, batch, input_shape, filter_shape, in_depth, out_depth, stride,
padding, test_input, data_format, use_gpu):
input_planes, input_rows, input_cols = input_shape
filter_planes, filter_rows, filter_cols = filter_shape
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
filter_shape = [
filter_planes, filter_rows, filter_cols, in_depth, out_depth
]
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if padding == "VALID":
output_planes = int(
math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
output_rows = int(
math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
output_cols = int(
math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
else:
output_planes = int(math.ceil(float(input_planes) / strides[1]))
output_rows = int(math.ceil(float(input_rows) / strides[2]))
output_cols = int(math.ceil(float(input_cols) / strides[3]))
output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
if test.is_gpu_available() and use_gpu:
data_type = dtypes.float32
# TODO(mjanusz): Modify gradient_checker to also provide max relative
# error and synchronize the tolerance levels between the tests for forward
# and backward computations.
if test.is_gpu_available():
tolerance = 5e-3
else:
# As of Aug 2016, higher tolerance is needed for some CPU architectures.
# Runs on a single machine can also generate slightly different errors
# because of multithreading.
tolerance = 8e-3
else:
data_type = dtypes.float64
tolerance = 1e-8
with self.test_session(use_gpu=use_gpu):
orig_input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
if data_format == "NCDHW":
input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
strides = test_util.NHWCToNCHW(strides)
else:
input_tensor = orig_input_tensor
conv = nn_ops.conv3d(
input_tensor, filter_tensor, strides, padding,
data_format=data_format, name="conv")
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
if test_input:
err = gradient_checker.compute_gradient_error(orig_input_tensor,
input_shape,
conv, output_shape)
else:
err = gradient_checker.compute_gradient_error(filter_tensor,
filter_shape, conv,
output_shape)
print("conv3d gradient error = ", err)
self.assertLess(err, tolerance)
def ConstructAndTestGradient(self, **kwargs):
for data_format, use_gpu in GetTestConfigs():
self._ConstructAndTestGradientForConfig(data_format=data_format,
use_gpu=use_gpu, **kwargs)
def testInputGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 5, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(4, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 5),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(7, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 7, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(4, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=False)
def testInputGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 2, 2),
filter_shape=(3, 2, 1),
in_depth=2,
out_depth=1,
stride=1,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(7, 3, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 3, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=True)
def testFilterGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def disabledtestFilterGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=False)
if __name__ == "__main__":
test.main()
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2012,2013,2014,2015 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Representation of Fully Qualified Domain Names """
from datetime import datetime
from six import string_types
from sqlalchemy import (Integer, DateTime, Sequence, Column, ForeignKey,
UniqueConstraint)
from sqlalchemy.orm import relation, deferred
from sqlalchemy.orm.session import Session
from aquilon.exceptions_ import InternalError, ArgumentError
from aquilon.aqdb.model import Base, DnsDomain, DnsEnvironment
from aquilon.aqdb.model.base import _raise_custom
from aquilon.aqdb.model.dns_domain import parse_fqdn
from aquilon.aqdb.column_types import AqStr
_TN = "fqdn"
class Fqdn(Base):
__tablename__ = _TN
_instance_label = 'fqdn'
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
name = Column(AqStr(63), nullable=False)
dns_domain_id = Column(ForeignKey(DnsDomain.id), nullable=False)
dns_environment_id = Column(ForeignKey(DnsEnvironment.id),
nullable=False, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
dns_domain = relation(DnsDomain, innerjoin=True)
dns_environment = relation(DnsEnvironment, innerjoin=True)
__table_args__ = (UniqueConstraint(dns_domain_id, name, dns_environment_id,
name='%s_domain_name_env_uk' % _TN),
{'info': {'unique_fields': ['dns_environment',
'dns_domain', 'name']}})
@property
def fqdn(self):
return self.name + '.' + self.dns_domain.name
@classmethod
def get_unique(cls, session, fqdn=None, dns_environment=None, name=None,
dns_domain=None, **kwargs):
if fqdn:
if name or dns_domain: # pragma: no cover
raise TypeError("fqdn and name/dns_domain should not be mixed")
(name, dns_domain) = parse_fqdn(session, fqdn)
if not isinstance(dns_environment, DnsEnvironment):
dns_environment = DnsEnvironment.get_unique_or_default(session,
dns_environment)
return super(Fqdn, cls).get_unique(session, name=name,
dns_domain=dns_domain,
dns_environment=dns_environment,
**kwargs)
@classmethod
def get_or_create(cls, session, dns_environment=None, preclude=False,
ignore_name_check=False, query_options=None, **kwargs):
fqdn = cls.get_unique(session, dns_environment=dns_environment,
query_options=query_options, **kwargs)
if fqdn:
if preclude:
_raise_custom(preclude, ArgumentError,
"{0} already exists.".format(fqdn))
return fqdn
if not isinstance(dns_environment, DnsEnvironment):
dns_environment = DnsEnvironment.get_unique_or_default(session,
dns_environment)
fqdn = cls(session=session, dns_environment=dns_environment,
ignore_name_check=ignore_name_check, **kwargs)
session.add(fqdn)
return fqdn
@classmethod
def check_name(cls, name, dns_domain, ignore_name_check=False):
""" Validate the name parameter """
if not isinstance(name, string_types): # pragma: no cover
raise TypeError("%s: name must be a string." % cls.name)
if not isinstance(dns_domain, DnsDomain): # pragma: no cover
raise TypeError("%s: dns_domain must be a DnsDomain." % cls.name)
# Allow SRV records to opt out from this test
if not ignore_name_check:
DnsDomain.check_label(name)
# The limit for DNS name length is 255, assuming wire format. This
# translates to 253 for simple ASCII text; see:
# http://www.ops.ietf.org/lists/namedroppers/namedroppers.2003/msg00964.html
if len(name) + 1 + len(dns_domain.name) > 253:
raise ArgumentError('The fully qualified domain name is too long.')
def _check_session(self, session):
if not session or not isinstance(session, Session): # pragma: no cover
raise InternalError("%s needs a session." % self._get_class_label())
def __init__(self, session=None, name=None, dns_domain=None, fqdn=None,
dns_environment=None, ignore_name_check=False, **kwargs):
if fqdn:
if name or dns_domain: # pragma: no cover
raise TypeError("fqdn and name/dns_domain should not be mixed")
self._check_session(session)
(name, dns_domain) = parse_fqdn(session, fqdn)
self.check_name(name, dns_domain, ignore_name_check)
if not isinstance(dns_environment, DnsEnvironment):
self._check_session(session)
dns_environment = DnsEnvironment.get_unique_or_default(session,
dns_environment)
super(Fqdn, self).__init__(name=name, dns_domain=dns_domain,
dns_environment=dns_environment, **kwargs)
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import unittest2 as unittest
from mpp.lib.gplog import GpLog, GpLogException, _DEFAULT_OUT_FILE
from mpp.lib.PSQL import PSQL
class GpLogRegressionTests(unittest.TestCase):
def test_gather_log_to_default_file(self):
if os.path.exists(_DEFAULT_OUT_FILE):
os.remove(_DEFAULT_OUT_FILE)
self.assertFalse(os.path.exists(_DEFAULT_OUT_FILE))
start_time = time.time()
PSQL.run_sql_command("select pg_sleep(2)")
end_time = time.time()
GpLog.gather_log(start_time=start_time, end_time=end_time)
self.assertTrue(os.path.exists(_DEFAULT_OUT_FILE))
self.assertTrue(os.path.getsize(_DEFAULT_OUT_FILE) > 0)
def test_gather_log_out_file(self):
out_file = '/tmp/cluster2.logs'
if os.path.exists(out_file):
os.remove(out_file)
self.assertFalse(os.path.exists(out_file))
start_time = time.time()
time.sleep(2)
end_time = time.time()
GpLog.gather_log(start_time=start_time, end_time=end_time, out_file=out_file)
self.assertTrue(os.path.exists(out_file))
self.assertTrue(os.path.getsize(out_file) > 0)
def test_check_log(self):
start_time = time.time()
PSQL.run_sql_command("SELECT * from some_table_that_does_not_exist_to_generate_errors_in_logs")
time.sleep(2)
end_time = time.time()
self.assertTrue(GpLog.check_log_for_errors(start_time, end_time))
# -*- coding: iso-8859-1 -*-
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
import os.path
import sys
from collections import deque
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["shlex", "split"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
if isinstance(instream, basestring):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if self.debug:
print 'shlex: reading from %s, line %d' \
% (self.instream, self.lineno)
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print "shlex: pushing token " + repr(tok)
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, basestring):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print 'shlex: pushing to file %s' % (self.infile,)
else:
print 'shlex: pushing to stream %s' % (self.instream,)
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print 'shlex: popping to %s, line %d' \
% (self.instream, self.lineno)
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print "shlex: popping token " + repr(tok)
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print "shlex: token=" + repr(raw)
else:
print "shlex: token=EOF"
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state", repr(self.state), \
"I see character:", repr(nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# XXX what error should be raised here?
raise ValueError, "No closing quotation"
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in escape state"
# XXX what error should be raised here?
raise ValueError, "No escaped character"
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print "shlex: raw token=" + repr(result)
else:
print "shlex: raw token=EOF"
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, basestring) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False):
lex = shlex(s, posix=True)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print "Token: " + repr(tt)
else:
break
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interactively generate sample text for a font.
Often in bug reports people will describe text using character names.
Replicating the sample text they describe can be a bit tedious. This
lets you interactively search characters in the font by name to assemble
a string and save it to a file."""
import argparse
import codecs
import readline
from nototools import coverage
from nototools import unicode_data
from fontTools import ttLib
def _help():
print ('enter a string to match or one of the following:\n'
' \'quit\' to exit,\n'
' \'help\' to show these options,\n'
' \'names\' for names,\n'
' \'dump\' to dump the current text,\n'
' \'clear\' to clear the current text,\n'
' \'write\' to be prompted for a filename to write the text to.')
def _build_text(name_map, initial_text=''):
text = initial_text
print 'build text using map of length %d' % len(name_map)
while True:
line = raw_input('> ')
if not line:
continue
if line == 'quit':
break
if line == 'help':
_help()
continue
if line == 'names':
print 'names:\n ' + '\n '.join(sorted(name_map.keys()))
continue
if line == 'dump':
print 'dump: \'%s\'' % text
for cp in text:
print '%06x %s' % (ord(cp), unicode_data.name(ord(cp)))
continue
if line == 'clear':
text = ''
continue
if line == 'write':
line = raw_input('file name> ')
if line:
_write_text(line, text)
continue
matches = []
for name, cp in sorted(name_map.iteritems()):
if line in name:
matches.append(name)
if not matches:
print 'no match for "%s"'% line
continue
if len(matches) == 1:
print matches[0]
text += unichr(name_map[matches[0]])
continue
# if we match a full line, then use that
if line in matches:
print line
text += unichr(name_map[line])
continue
new_matches = []
for m in matches:
if line in m.split(' '):
new_matches.append(m)
# if we match a full word, and only one line has this full word, use that
if len(new_matches) == 1:
print new_matches[0]
text += unichr(name_map[new_matches[0]])
continue
select_multiple = True
while select_multiple:
print 'multiple matches:\n ' + '\n '.join(
'[%2d] %s' % (i, n) for i, n in enumerate(matches))
while True:
line = raw_input('0-%d or q to skip> ' % (len(matches) - 1))
if line == 'q':
select_multiple = False
break
try:
n = int(line)
break
except ValueError:
continue
if not select_multiple: # q
break
if n < 0 or n >= len(matches):
print '%d out of range' % n
continue
text += unichr(name_map[matches[n]])
select_multiple = False
print 'done.'
return text
def _get_char_names(charset):
name_map = {}
if charset:
for cp in charset:
try:
name = unicode_data.name(cp)
except:
name = None
if not name or name == '':
name = '%04x' % cp
else:
name = '%04x %s' % (cp, name.lower())
name_map[name] = cp
return name_map
def _write_text(filename, text):
with codecs.open(filename, 'w', 'utf-8') as f:
f.write(text)
print 'wrote %s' % filename
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-f', '--font',
help='font whose character map to restrict text to',
required=True)
parser.add_argument(
'-t', '--text',
help='initial text, prepend @ to read from file')
args = parser.parse_args()
if args.text:
if args.text[0] == '@':
with codecs.open(args.text[1:], 'r', 'utf-8') as f:
text = f.read()
else:
text = args.text
else:
text = ''
if args.font:
charset = coverage.character_set(args.font)
name_map = _get_char_names(charset)
text = _build_text(name_map, text)
print 'text: ' + text
else:
charset = None
if __name__ == '__main__':
main()
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution as distributions
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
__all__ = ["QuantizedDistribution"]
def _logsum_expbig_minus_expsmall(big, small):
"""Stable evaluation of `Log[exp{big} - exp{small}]`.
To work correctly, we should have the pointwise relation: `small <= big`.
Args:
big: Numeric `Tensor`
small: Numeric `Tensor` with same `dtype` as `big` and broadcastable shape.
Returns:
`Tensor` of same `dtype` of `big` and broadcast shape.
"""
with ops.name_scope("logsum_expbig_minus_expsmall", values=[small, big]):
return math_ops.log(1. - math_ops.exp(small - big)) + big
_prob_base_note = """
For whole numbers `y`,
```
P[Y = y] := P[X <= lower_cutoff], if y == lower_cutoff,
:= P[X > upper_cutoff - 1], y == upper_cutoff,
:= 0, if j < lower_cutoff or y > upper_cutoff,
:= P[y - 1 < X <= y], all other y.
```
"""
_prob_note = _prob_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`. If the
base distribution has a `survival_function` method, results will be more
accurate for large values of `y`, and in this case the `survival_function` must
also be defined on `y - 1`.
"""
_log_prob_note = _prob_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`. If the
base distribution has a `log_survival_function` method results will be more
accurate for large values of `y`, and in this case the `log_survival_function`
must also be defined on `y - 1`.
"""
_cdf_base_note = """
For whole numbers `y`,
```
cdf(y) := P[Y <= y]
= 1, if y >= upper_cutoff,
= 0, if y < lower_cutoff,
= P[X <= y], otherwise.
```
Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.
This dictates that fractional `y` are first floored to a whole number, and
then above definition applies.
"""
_cdf_note = _cdf_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`.
"""
_log_cdf_note = _cdf_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`.
"""
_sf_base_note = """
For whole numbers `y`,
```
survival_function(y) := P[Y > y]
= 0, if y >= upper_cutoff,
= 1, if y < lower_cutoff,
= P[X <= y], otherwise.
```
Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.
This dictates that fractional `y` are first floored to a whole number, and
then above definition applies.
"""
_sf_note = _sf_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`.
"""
_log_sf_note = _sf_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`.
"""
class QuantizedDistribution(distributions.Distribution):
"""Distribution representing the quantization `Y = ceiling(X)`.
#### Definition in terms of sampling.
```
1. Draw X
2. Set Y <-- ceiling(X)
3. If Y < lower_cutoff, reset Y <-- lower_cutoff
4. If Y > upper_cutoff, reset Y <-- upper_cutoff
5. Return Y
```
#### Definition in terms of the probability mass function.
Given scalar random variable `X`, we define a discrete random variable `Y`
supported on the integers as follows:
```
P[Y = j] := P[X <= lower_cutoff], if j == lower_cutoff,
:= P[X > upper_cutoff - 1], j == upper_cutoff,
:= 0, if j < lower_cutoff or j > upper_cutoff,
:= P[j - 1 < X <= j], all other j.
```
Conceptually, without cutoffs, the quantization process partitions the real
line `R` into half open intervals, and identifies an integer `j` with the
right endpoints:
```
R = ... (-2, -1](-1, 0](0, 1](1, 2](2, 3](3, 4] ...
j = ... -1 0 1 2 3 4 ...
```
`P[Y = j]` is the mass of `X` within the `jth` interval.
If `lower_cutoff = 0`, and `upper_cutoff = 2`, then the intervals are redrawn
and `j` is re-assigned:
```
R = (-infty, 0](0, 1](1, infty)
j = 0 1 2
```
`P[Y = j]` is still the mass of `X` within the `jth` interval.
#### Caveats
Since evaluation of each `P[Y = j]` involves a cdf evaluation (rather than
a closed form function such as for a Poisson), computations such as mean and
entropy are better done with samples or approximations, and are not
implemented by this class.
"""
def __init__(self,
distribution,
lower_cutoff=None,
upper_cutoff=None,
validate_args=False,
name="QuantizedDistribution"):
"""Construct a Quantized Distribution representing `Y = ceiling(X)`.
Some properties are inherited from the distribution defining `X`. Example:
`allow_nan_stats` is determined for this `QuantizedDistribution` by reading
the `distribution`.
Args:
distribution: The base distribution class to transform. Typically an
instance of `Distribution`.
lower_cutoff: `Tensor` with same `dtype` as this distribution and shape
able to be added to samples. Should be a whole number. Default `None`.
If provided, base distribution's pdf/pmf should be defined at
`lower_cutoff`.
upper_cutoff: `Tensor` with same `dtype` as this distribution and shape
able to be added to samples. Should be a whole number. Default `None`.
If provided, base distribution's pdf/pmf should be defined at
`upper_cutoff - 1`.
`upper_cutoff` must be strictly greater than `lower_cutoff`.
validate_args: Python boolean. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
name: The name for the distribution.
Raises:
TypeError: If `dist_cls` is not a subclass of
`Distribution` or continuous.
NotImplementedError: If the base distribution does not implement `cdf`.
"""
parameters = locals()
parameters.pop("self")
values = (
list(distribution.parameters.values()) +
[lower_cutoff, upper_cutoff])
with ops.name_scope(name, values=values) as ns:
self._dist = distribution
if lower_cutoff is not None:
lower_cutoff = ops.convert_to_tensor(lower_cutoff, name="lower_cutoff")
if upper_cutoff is not None:
upper_cutoff = ops.convert_to_tensor(upper_cutoff, name="upper_cutoff")
contrib_tensor_util.assert_same_float_dtype(
tensors=[self.distribution, lower_cutoff, upper_cutoff])
# We let QuantizedDistribution access _graph_parents since this class is
# more like a baseclass.
graph_parents = self._dist._graph_parents # pylint: disable=protected-access
checks = []
if lower_cutoff is not None and upper_cutoff is not None:
message = "lower_cutoff must be strictly less than upper_cutoff."
checks.append(
check_ops.assert_less(
lower_cutoff, upper_cutoff, message=message))
self._validate_args = validate_args # self._check_integer uses this.
with ops.control_dependencies(checks if validate_args else []):
if lower_cutoff is not None:
self._lower_cutoff = self._check_integer(lower_cutoff)
graph_parents += [self._lower_cutoff]
else:
self._lower_cutoff = None
if upper_cutoff is not None:
self._upper_cutoff = self._check_integer(upper_cutoff)
graph_parents += [self._upper_cutoff]
else:
self._upper_cutoff = None
super(QuantizedDistribution, self).__init__(
dtype=self._dist.dtype,
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=self._dist.allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=ns)
def _batch_shape(self):
return self.distribution.batch_shape()
def _get_batch_shape(self):
return self.distribution.get_batch_shape()
def _event_shape(self):
return self.distribution.event_shape()
def _get_event_shape(self):
return self.distribution.get_event_shape()
def _sample_n(self, n, seed=None):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
with ops.name_scope("transform"):
n = ops.convert_to_tensor(n, name="n")
x_samps = self.distribution.sample_n(n=n, seed=seed)
ones = array_ops.ones_like(x_samps)
# Snap values to the intervals (j - 1, j].
result_so_far = math_ops.ceil(x_samps)
if lower_cutoff is not None:
result_so_far = array_ops.where(result_so_far < lower_cutoff,
lower_cutoff * ones, result_so_far)
if upper_cutoff is not None:
result_so_far = array_ops.where(result_so_far > upper_cutoff,
upper_cutoff * ones, result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_log_prob_note)
def _log_prob(self, y):
if not hasattr(self.distribution, "_log_cdf"):
raise NotImplementedError(
"'log_prob' not implemented unless the base distribution implements "
"'log_cdf'")
y = self._check_integer(y)
try:
return self._log_prob_with_logsf_and_logcdf(y)
except NotImplementedError:
return self._log_prob_with_logcdf(y)
def _log_prob_with_logcdf(self, y):
return _logsum_expbig_minus_expsmall(self.log_cdf(y), self.log_cdf(y - 1))
def _log_prob_with_logsf_and_logcdf(self, y):
"""Compute log_prob(y) using log survival_function and cdf together."""
# There are two options that would be equal if we had infinite precision:
# Log[ sf(y - 1) - sf(y) ]
# = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]
# Log[ cdf(y) - cdf(y - 1) ]
# = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]
logsf_y = self.log_survival_function(y)
logsf_y_minus_1 = self.log_survival_function(y - 1)
logcdf_y = self.log_cdf(y)
logcdf_y_minus_1 = self.log_cdf(y - 1)
# Important: Here we use select in a way such that no input is inf, this
# prevents the troublesome case where the output of select can be finite,
# but the output of grad(select) will be NaN.
# In either case, we are doing Log[ exp{big} - exp{small} ]
# We want to use the sf items precisely when we are on the right side of the
# median, which occurs when logsf_y < logcdf_y.
big = array_ops.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y)
small = array_ops.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1)
return _logsum_expbig_minus_expsmall(big, small)
@distribution_util.AppendDocstring(_prob_note)
def _prob(self, y):
if not hasattr(self.distribution, "_cdf"):
raise NotImplementedError(
"'prob' not implemented unless the base distribution implements "
"'cdf'")
y = self._check_integer(y)
try:
return self._prob_with_sf_and_cdf(y)
except NotImplementedError:
return self._prob_with_cdf(y)
def _prob_with_cdf(self, y):
return self.cdf(y) - self.cdf(y - 1)
def _prob_with_sf_and_cdf(self, y):
# There are two options that would be equal if we had infinite precision:
# sf(y - 1) - sf(y)
# cdf(y) - cdf(y - 1)
sf_y = self.survival_function(y)
sf_y_minus_1 = self.survival_function(y - 1)
cdf_y = self.cdf(y)
cdf_y_minus_1 = self.cdf(y - 1)
# sf_prob has greater precision iff we're on the right side of the median.
return array_ops.where(
sf_y < cdf_y, # True iff we're on the right side of the median.
sf_y_minus_1 - sf_y,
cdf_y - cdf_y_minus_1)
@distribution_util.AppendDocstring(_log_cdf_note)
def _log_cdf(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= upper_cutoff,
# = 0, if y < lower_cutoff,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
result_so_far = self.distribution.log_cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j < lower_cutoff, neg_inf, result_so_far)
if upper_cutoff is not None:
result_so_far = array_ops.where(j >= upper_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_cdf_note)
def _cdf(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= upper_cutoff,
# = 0, if y < lower_cutoff,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
# P[X <= j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = array_ops.where(j < lower_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
result_so_far = array_ops.where(j >= upper_cutoff,
array_ops.ones_like(result_so_far),
result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_log_sf_note)
def _log_survival_function(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= upper_cutoff,
# = 1, if y < lower_cutoff,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.log_survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = array_ops.where(j < lower_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j >= upper_cutoff, neg_inf, result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_sf_note)
def _survival_function(self, y):
lower_cutoff = self._lower_cutoff
upper_cutoff = self._upper_cutoff
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= upper_cutoff,
# = 1, if y < lower_cutoff,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when lower_cutoff < X < upper_cutoff.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if lower_cutoff is not None:
result_so_far = array_ops.where(j < lower_cutoff,
array_ops.ones_like(result_so_far),
result_so_far)
if upper_cutoff is not None:
result_so_far = array_ops.where(j >= upper_cutoff,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
def _check_integer(self, value):
with ops.name_scope("check_integer", values=[value]):
value = ops.convert_to_tensor(value, name="value")
if not self.validate_args:
return value
dependencies = [distribution_util.assert_integer_form(
value, message="value has non-integer components.")]
return control_flow_ops.with_dependencies(dependencies, value)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._dist
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.www.plugin import Application
from buildbot.schedulers.forcesched import TextParameter
class CodeParameter(TextParameter):
"""A code editor using ace"""
spec_attributes = ["mode", "height"]
type = "code"
mode = "text"
height = 200
# create the interface for the setuptools entry point
ep = Application(__name__, "Buildbot forcescheduler parameter using ace.js to submit code")
from AntColony import AntColony
from AntGraph import AntGraph
from TspPainter import tspPainter
import logging
logger = logging.getLogger("logger")
class VRPCenter:
def __init__(self, tspparser):
self.build_graph(tspparser)
def build_graph(self, tspparser):
self.antGraph = AntGraph(tspparser.cities_coord)
self.lockers = tspparser.lockers
self.lockers_dict = {}
self.delivers_dict = {}
for locker in self.lockers:
self.lockers_dict[locker.id] = locker
self.delivers = tspparser.delivers
for deliver in self.delivers:
self.delivers_dict[deliver.id] = deliver
self.demands = tspparser.demands
self.build_nearest_locker()
def build_nearest_locker(self):
for deliver in self.delivers:
deliver.locker_id = deliver.nearest_locker(self.lockers, self.antGraph.nodes_mat)
locker = self.lockers_dict[deliver.locker_id]
locker.delivers.append(deliver.id)
def start(self):
antColony = AntColony(self.antGraph, self.lockers, self.lockers_dict, self.delivers, self.delivers_dict, self.demands, 10, 250)
antColony.start()
best_path_routes = antColony.best_path_routes
best_path_cost = antColony.best_path_cost
logger.info("-------------------------------------------")
logger.info("Problem optimization result")
logger.info("-------------------------------------------")
if best_path_routes != None:
logger.info("Best path routes found is")
for key in best_path_routes.keys():
logger.info("Deliver {} {}".format(key, best_path_routes[key]))
logger.info("Locker scheme is")
for locker in self.lockers:
logger.info("Locker {} scheme: {}".format(locker.id, self.locker_scheme(locker, best_path_routes)))
logger.info("cost : {}".format(best_path_cost))
tspPainter.drawRoutes(best_path_routes)
else:
logger.info("Failed to path routes")
input("Press Enter to quit...")
def locker_scheme(self, locker, path_routes):
capacity = 0
for deliver_id in locker.delivers:
if deliver_id in path_routes.keys():
path = path_routes[deliver_id]
for pack in path:
capacity += pack.capacity
capacity += self.demands[locker.pos]
return capacity
# -*- encoding: utf-8 -*-
# This is a package that contains a number of modules that are used to
# test import from the source files that have different encodings.
# This file (the __init__ module of the package), is encoded in utf-8
# and contains a list of strings from various unicode planes that are
# encoded differently to compare them to the same strings encoded
# differently in submodules. The following list, test_strings,
# contains a list of tuples. The first element of each tuple is the
# suffix that should be prepended with 'module_' to arrive at the
# encoded submodule name, the second item is the encoding and the last
# is the test string. The same string is assigned to the variable
# named 'test' inside the submodule. If the decoding of modules works
# correctly, from module_xyz import test should result in the same
# string as listed below in the 'xyz' entry.
# module, encoding, test string
test_strings = (
('iso_8859_1', 'iso-8859-1', "Les hommes ont oublié cette vérité, "
"dit le renard. Mais tu ne dois pas l'oublier. Tu deviens "
"responsable pour toujours de ce que tu as apprivoisé."),
('koi8_r', 'koi8-r', "Познание бесконечности требует бесконечного времени.")
)
import os, Pyro4, new
import django.htoken.serializer
from django.utils.importlib import import_module
from django.analysis.persisted import mw_socket
from django.http import get_changeset
PYRO_NAME = 'middleware'
def pre_req(self, request, delta):
request = self.cereal.deserialize(request)
self.cereal.apply_req_delta(request, delta)
request.reset_changeset()
return request
def spawn_middleware_server(mw_path):
pid = os.fork()
if pid == 0:
start_daemon(mw_path)
import sys
sys.exit(0)
else:
return pid
def get_middleware_methods(self):
names = ('process_request', 'process_view', 'process_template_response', 'process_response',
'process_exception')
return [ name for name in names if hasattr(self, name) ]
from traceback import format_exc
def proxied_response(self, request, response, delta):
try:
request = pre_req(self, request, delta)
response = self._process_response(request, response)
return response, request.get_changeset()
except:
print format_exc()
def proxied_template_response(self, request, response, delta):
try:
request = pre_req(self, request, delta)
response = self._process_template_response(request, response)
return response, request.get_changeset()
except:
print format_exc()
def proxied_request(self, request, delta):
try:
request = pre_req(self, request, delta)
response = self._process_request(request)
return response, request.get_changeset()
except:
print format_exc()
def proxied_view(self, request, callback_dummy, callback_args, callback_kwargs, delta):
try:
request = pre_req(self, request, delta)
response = self._process_view(request, callback_dummy, callback_args, callback_kwargs)
return response, request.get_changeset()
except:
print format_exc()
def proxied_exception(self, request, e, delta):
try:
request = pre_req(self, request, delta)
response = self._process_exception(request, e)
return response, request.get_changeset()
except:
print format_exc()
def start_daemon(middleware_path):
try:
mw_module, mw_classname = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class'
% (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
return
mw_instance.get_middleware_methods = new.instancemethod(get_middleware_methods,
mw_instance, mw_instance.__class__) # fuh!
names = mw_instance.get_middleware_methods()
if 'process_response' in names:
mw_instance._process_response = mw_instance.process_response
mw_instance.process_response = new.instancemethod(proxied_response,
mw_instance,
mw_instance.__class__)
if 'process_exception' in names:
mw_instance._process_exception = mw_instance.process_exception
mw_instance.process_exception = new.instancemethod(proxied_exception,
mw_instance,
mw_instance.__class__)
if 'process_template_response' in names:
mw_instance._process_template_response = mw_instance.process_template_response
mw_instance.process_template_response = new.instancemethod(proxied_template_response,
mw_instance,
mw_instance.__class__)
if 'process_view' in names:
mw_instance._process_view = mw_instance.process_view
mw_instance.process_view = new.instancemethod(proxied_view,
mw_instance,
mw_instance.__class__)
if 'process_request' in names:
mw_instance._process_request = mw_instance.process_request
mw_instance.process_request = new.instancemethod(proxied_request,
mw_instance,
mw_instance.__class__)
daemon = False
try:
local = mw_instance
daemon = Pyro4.Daemon(unixsocket=mw_socket(middleware_path))
daemon.serializer = django.htoken.serializer.Serializer()
local.cereal = daemon.serializer
daemon.register(local, PYRO_NAME)
daemon.requestLoop()
finally:
if daemon:
daemon.close()
#!/usr/bin/env python
import os
import subprocess
import sys
KNOWN_LIVE_SUITES = [
'client',
'glance',
'identity',
'nova',
'swift',
]
def ensure_tarmac_log_dir():
"""Hack-around tarmac not creating its own log directory."""
try:
os.makedirs(os.path.expanduser("~/logs/"))
except OSError:
# Could be already exists, or cannot create, either way, just continue
pass
def create_tarmac_repository():
"""Try to ensure a shared repository for the code."""
try:
from bzrlib import (
branch,
controldir,
errors,
transport,
repository,
reconfigure,
)
except ImportError:
sys.stderr.write('Could not import bzrlib to ensure a repository\n')
return
try:
b, _ = branch.Branch.open_containing('.')
except:
sys.stderr.write('Could not open local branch\n')
return
# By the time we get here, we've already branched everything from
# launchpad. So if we aren't in a shared repository, we create one, and
# fetch all the data into it, so it doesn't have to be fetched again.
if b.repository.is_shared():
return
pwd = os.getcwd()
expected_dir = 'src/launchpad.net/'
offset = pwd.rfind(expected_dir)
if offset == -1:
sys.stderr.write('Could not find %r to create a shared repo\n')
return
path = pwd[:offset+len(expected_dir)]
try:
repository.Repository.open(path)
except (errors.NoRepositoryPresent, errors.NotBranchError):
pass # Good, the repo didn't exist
else:
# We must have already created the repo.
return
repo_fmt = controldir.format_registry.make_bzrdir('default')
trans = transport.get_transport(path)
info = repo_fmt.initialize_on_transport_ex(trans, create_prefix=False,
make_working_trees=True, shared_repo=True, force_new_repo=True,
use_existing_dir=True,
repo_format_name=repo_fmt.repository_format.get_format_string())
repo = info[0]
sys.stderr.write('Reconfiguring to use a shared repository\n')
reconfiguration = reconfigure.Reconfigure.to_use_shared(b.bzrdir)
try:
reconfiguration.apply(False)
except errors.NoRepositoryPresent:
sys.stderr.write('tarmac did a lightweight checkout,'
' not fetching into the repo.\n')
def ensure_juju_core_dependencies():
"""Ensure that juju-core and all dependencies have been installed."""
# Note: This potentially overwrites goose while it is updating the world.
# However, if we are targetting the trunk branch of goose, that should have
# already been updated to the latest version by tarmac.
# I don't quite see a way to reconcile that we want the latest juju-core
# and all of the other dependencies, but we don't want to touch goose
# itself. One option would be to have a split GOPATH. One installs the
# latest juju-core and everything else. The other is where the
# goose-under-test resides. So we don't add the goose-under-test to GOPATH,
# call "go get", then add it to the GOPATH for the rest of the testing.
cmd = ['go', 'get', '-u', '-x', 'launchpad.net/juju-core/...']
sys.stderr.write('Running: %s\n' % (' '.join(cmd),))
retcode = subprocess.call(cmd)
if retcode != 0:
sys.stderr.write('WARN: Failed to update launchpad.net/juju-core\n')
def tarmac_setup(opts):
"""Do all the bits of setup that need to happen for the tarmac bot."""
ensure_tarmac_log_dir()
create_tarmac_repository()
def setup_gopath():
pwd = os.getcwd()
if sys.platform == 'win32':
pwd = pwd.replace('\\', '/')
offset = pwd.rfind('src/gopkg.in/goose.v1')
if offset == -1:
sys.stderr.write('Could not find "src/gopkg.in/goose.v1" in cwd: %s\n'
% (pwd,))
sys.stderr.write('Unable to automatically set GOPATH\n')
return
add_gopath = pwd[:offset].rstrip('/')
gopath = os.environ.get("GOPATH")
if gopath:
if add_gopath in gopath:
return
# Put this path first, so we know we are running these tests
gopath = add_gopath + os.pathsep + gopath
else:
gopath = add_gopath
sys.stderr.write('Setting GOPATH to: %s\n' % (gopath,))
os.environ['GOPATH'] = gopath
def run_cmd(cmd):
cmd_str = ' '.join(cmd)
sys.stderr.write('Running: %s\n' % (cmd_str,))
retcode = subprocess.call(cmd)
if retcode != 0:
sys.stderr.write('FAIL: failed running: %s\n' % (cmd_str,))
return retcode
def run_go_fmt(opts):
return run_cmd(['go', 'fmt', './...'])
def run_go_build(opts):
return run_cmd(['go', 'build', './...'])
def run_go_test(opts):
# Note: I wish we could run this with '-check.v'
return run_cmd(['go', 'test', './...'])
def run_juju_core_tests(opts):
"""Run the juju-core test suite"""
orig_wd = os.getcwd()
try:
sys.stderr.write('Switching to juju-core\n')
os.chdir('../juju-core')
retval = run_cmd(['go', 'build', './...'])
if retval != 0:
return retval
return run_cmd(['go', 'test', './...'])
finally:
os.chdir(orig_wd)
def run_live_tests(opts):
"""Run all of the live tests."""
orig_wd = os.getcwd()
final_retcode = 0
for d in KNOWN_LIVE_SUITES:
try:
cmd = ['go', 'test', '-live', '-check.v']
sys.stderr.write('Running: %s in %s\n' % (' '.join(cmd), d))
os.chdir(d)
retcode = subprocess.call(cmd)
if retcode != 0:
sys.stderr.write('FAIL: Running live tests in %s\n' % (d,))
final_retcode = retcode
finally:
os.chdir(orig_wd)
return final_retcode
def main(args):
import argparse
p = argparse.ArgumentParser(description='Run the goose test suite')
p.add_argument('--verbose', action='store_true', help='Be chatty')
p.add_argument('--version', action='version', version='%(prog)s 0.1')
p.add_argument('--tarmac', action='store_true',
help="Pass this if the script is running as the tarmac bot."
" This is used for stuff like ensuring repositories and"
" logging directories are initialized.")
p.add_argument('--juju-core', action='store_true',
help="Run the juju-core trunk tests as well as the goose tests.")
p.add_argument('--live', action='store_true',
help="Run tests against a live service.")
opts = p.parse_args(args)
setup_gopath()
if opts.tarmac:
tarmac_setup(opts)
to_run = [run_go_fmt, run_go_build, run_go_test]
if opts.juju_core:
ensure_juju_core_dependencies()
to_run.append(run_juju_core_tests)
if opts.live:
to_run.append(run_live_tests)
for func in to_run:
retcode = func(opts)
if retcode != 0:
return retcode
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv[1:]))
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from __future__ import absolute_import
import json
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.encoding import force_unicode
from django.utils.functional import Promise
from ..markup import Markup
class PootleJSONEncoder(DjangoJSONEncoder):
"""Custom JSON encoder for Pootle.
This is mostly implemented to avoid calling `force_unicode` all the time on
certain types of objects.
https://docs.djangoproject.com/en/1.4/topics/serialization/#id2
"""
def default(self, obj):
if isinstance(obj, Promise) or isinstance(obj, Markup):
return force_unicode(obj)
return super(PootleJSONEncoder, self).default(obj)
def jsonify(obj):
"""Serialize Python `obj` object into a JSON string."""
if settings.DEBUG:
indent = 4
else:
indent = None
return json.dumps(obj, indent=indent, cls=PootleJSONEncoder)
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
unescapeHTML,
)
class UstudioIE(InfoExtractor):
IE_NAME = 'ustudio'
_VALID_URL = r'https?://(?:(?:www|v1)\.)?ustudio\.com/video/(?P[^/]+)/(?P[^/?#&]+)'
_TEST = {
'url': 'http://ustudio.com/video/Uxu2my9bgSph/san_francisco_golden_gate_bridge',
'md5': '58bbfca62125378742df01fc2abbdef6',
'info_dict': {
'id': 'Uxu2my9bgSph',
'display_id': 'san_francisco_golden_gate_bridge',
'ext': 'mp4',
'title': 'San Francisco: Golden Gate Bridge',
'description': 'md5:23925500697f2c6d4830e387ba51a9be',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20111107',
'uploader': 'Tony Farley',
}
}
def _real_extract(self, url):
video_id, display_id = re.match(self._VALID_URL, url).groups()
config = self._download_xml(
'http://v1.ustudio.com/embed/%s/ustudio/config.xml' % video_id,
display_id)
def extract(kind):
return [{
'url': unescapeHTML(item.attrib['url']),
'width': int_or_none(item.get('width')),
'height': int_or_none(item.get('height')),
} for item in config.findall('./qualities/quality/%s' % kind) if item.get('url')]
formats = extract('video')
self._sort_formats(formats)
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
upload_date = unified_strdate(self._search_regex(
r'(?s)Uploaded by\s*.+?\s*on\s*([^<]+)',
webpage, 'upload date', fatal=False))
uploader = self._search_regex(
r'Uploaded by\s*]*>([^<]+)<',
webpage, 'uploader', fatal=False)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnails': extract('image'),
'upload_date': upload_date,
'uploader': uploader,
'formats': formats,
}
class UstudioEmbedIE(InfoExtractor):
IE_NAME = 'ustudio:embed'
_VALID_URL = r'https?://(?:(?:app|embed)\.)?ustudio\.com/embed/(?P[^/]+)/(?P[^/]+)'
_TEST = {
'url': 'http://app.ustudio.com/embed/DeN7VdYRDKhP/Uw7G1kMCe65T',
'md5': '47c0be52a09b23a7f40de9469cec58f4',
'info_dict': {
'id': 'Uw7G1kMCe65T',
'ext': 'mp4',
'title': '5 Things IT Should Know About Video',
'description': 'md5:93d32650884b500115e158c5677d25ad',
'uploader_id': 'DeN7VdYRDKhP',
}
}
def _real_extract(self, url):
uploader_id, video_id = re.match(self._VALID_URL, url).groups()
video_data = self._download_json(
'http://app.ustudio.com/embed/%s/%s/config.json' % (uploader_id, video_id),
video_id)['videos'][0]
title = video_data['name']
formats = []
for ext, qualities in video_data.get('transcodes', {}).items():
for quality in qualities:
quality_url = quality.get('url')
if not quality_url:
continue
height = int_or_none(quality.get('height'))
formats.append({
'format_id': '%s-%dp' % (ext, height) if height else ext,
'url': quality_url,
'width': int_or_none(quality.get('width')),
'height': height,
})
self._sort_formats(formats)
thumbnails = []
for image in video_data.get('images', []):
image_url = image.get('url')
if not image_url:
continue
thumbnails.append({
'url': image_url,
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'uploader_id': uploader_id,
'tags': video_data.get('keywords'),
'thumbnails': thumbnails,
'formats': formats,
}
# encoding: utf-8
import paste.fixture
from ckan.common import config
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.plugins as p
import ckan.lib.helpers as h
import ckanext.reclineview.plugin as plugin
import ckan.lib.create_test_data as create_test_data
import ckan.config.middleware as middleware
from ckan.tests import helpers, factories
class BaseTestReclineViewBase(tests.WsgiAppCase):
@classmethod
def setup_class(cls):
cls.config_templates = config['ckan.legacy_templates']
config['ckan.legacy_templates'] = 'false'
wsgiapp = middleware.make_app(config['global_conf'], **config)
p.load(cls.view_type)
cls.app = paste.fixture.TestApp(wsgiapp)
cls.p = cls.view_class()
create_test_data.CreateTestData.create()
cls.resource_view, cls.package, cls.resource_id = \
_create_test_view(cls.view_type)
@classmethod
def teardown_class(cls):
config['ckan.legacy_templates'] = cls.config_templates
p.unload(cls.view_type)
model.repo.rebuild_db()
def test_can_view(self):
data_dict = {'resource': {'datastore_active': True}}
assert self.p.can_view(data_dict)
data_dict = {'resource': {'datastore_active': False}}
assert not self.p.can_view(data_dict)
def test_title_description_iframe_shown(self):
url = h.url_for(controller='package', action='resource_read',
id=self.package.name, resource_id=self.resource_id)
result = self.app.get(url)
assert self.resource_view['title'] in result
assert self.resource_view['description'] in result
assert 'data-module="data-viewer"' in result.body
class TestReclineView(BaseTestReclineViewBase):
view_type = 'recline_view'
view_class = plugin.ReclineView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
def test_can_view_format_no_datastore(self):
'''
Test can_view with acceptable formats when datastore_active is False
(DataProxy in use).
'''
formats = ['CSV', 'XLS', 'TSV', 'csv', 'xls', 'tsv']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert self.p.can_view(data_dict)
def test_can_view_bad_format_no_datastore(self):
'''
Test can_view with incorrect formats when datastore_active is False.
'''
formats = ['TXT', 'txt', 'doc', 'JSON']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert not self.p.can_view(data_dict)
class TestReclineViewDatastoreOnly(helpers.FunctionalTestBase):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
if not p.plugin_loaded('datastore'):
p.load('datastore')
app_config = config.copy()
app_config['ckan.legacy_templates'] = 'false'
app_config['ckan.plugins'] = 'recline_view datastore'
app_config['ckan.views.default_views'] = 'recline_view'
wsgiapp = middleware.make_app(config['global_conf'], **app_config)
cls.app = paste.fixture.TestApp(wsgiapp)
@classmethod
def teardown_class(cls):
if p.plugin_loaded('recline_view'):
p.unload('recline_view')
if p.plugin_loaded('datastore'):
p.unload('datastore')
def test_create_datastore_only_view(self):
dataset = factories.Dataset()
data = {
'resource': {'package_id': dataset['id']},
'fields': [{'id': 'a'}, {'id': 'b'}],
'records': [{'a': 1, 'b': 'xyz'}, {'a': 2, 'b': 'zzz'}]
}
result = helpers.call_action('datastore_create', **data)
resource_id = result['resource_id']
url = h.url_for(controller='package', action='resource_read',
id=dataset['id'], resource_id=resource_id)
result = self.app.get(url)
assert 'data-module="data-viewer"' in result.body
class TestReclineGridView(BaseTestReclineViewBase):
view_type = 'recline_grid_view'
view_class = plugin.ReclineGridView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
class TestReclineGraphView(BaseTestReclineViewBase):
view_type = 'recline_graph_view'
view_class = plugin.ReclineGraphView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'graph_type', 'group', 'series']
_assert_schema_exists_and_has_keys(schema, expected_keys)
class TestReclineMapView(BaseTestReclineViewBase):
view_type = 'recline_map_view'
view_class = plugin.ReclineMapView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'map_field_type',
'latitude_field', 'longitude_field', 'geojson_field',
'auto_zoom', 'cluster_markers']
_assert_schema_exists_and_has_keys(schema, expected_keys)
def _create_test_view(view_type):
context = {'model': model,
'session': model.Session,
'user': model.User.get('testsysadmin').name}
package = model.Package.get('annakarenina')
resource_id = package.resources[1].id
resource_view = {'resource_id': resource_id,
'view_type': view_type,
'title': u'Test View',
'description': u'A nice test view'}
resource_view = p.toolkit.get_action('resource_view_create')(
context, resource_view)
return resource_view, package, resource_id
def _assert_schema_exists_and_has_keys(schema, expected_keys):
assert schema is not None, schema
keys = schema.keys()
keys.sort()
expected_keys.sort()
assert keys == expected_keys, '%s != %s' % (keys, expected_keys)
"""Tests for 1-Wire devices connected on OWServer."""
from pyownet.protocol import Error as ProtocolError
import pytest
from homeassistant.components.onewire.const import (
DEFAULT_OWSERVER_PORT,
DOMAIN,
PRESSURE_CBAR,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_VOLTAGE,
ELECTRICAL_CURRENT_AMPERE,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_MBAR,
TEMP_CELSIUS,
VOLT,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import mock_device_registry, mock_registry
MOCK_CONFIG = {
SENSOR_DOMAIN: {
"platform": DOMAIN,
"host": "localhost",
"port": DEFAULT_OWSERVER_PORT,
"names": {
"10.111111111111": "My DS18B20",
},
}
}
MOCK_DEVICE_SENSORS = {
"00.111111111111": {
"inject_reads": [
b"", # read device type
],
"sensors": [],
},
"10.111111111111": {
"inject_reads": [
b"DS18S20", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "10.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS18S20",
"name": "10.111111111111",
},
"sensors": [
{
"entity_id": "sensor.my_ds18b20_temperature",
"unique_id": "/10.111111111111/temperature",
"injected_value": b" 25.123",
"result": "25.1",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"12.111111111111": {
"inject_reads": [
b"DS2406", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "12.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS2406",
"name": "12.111111111111",
},
"sensors": [
{
"entity_id": "sensor.12_111111111111_temperature",
"unique_id": "/12.111111111111/TAI8570/temperature",
"injected_value": b" 25.123",
"result": "25.1",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
"disabled": True,
},
{
"entity_id": "sensor.12_111111111111_pressure",
"unique_id": "/12.111111111111/TAI8570/pressure",
"injected_value": b" 1025.123",
"result": "1025.1",
"unit": PRESSURE_MBAR,
"class": DEVICE_CLASS_PRESSURE,
"disabled": True,
},
],
},
"1D.111111111111": {
"inject_reads": [
b"DS2423", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "1D.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS2423",
"name": "1D.111111111111",
},
"sensors": [
{
"entity_id": "sensor.1d_111111111111_counter_a",
"unique_id": "/1D.111111111111/counter.A",
"injected_value": b" 251123",
"result": "251123",
"unit": "count",
"class": None,
},
{
"entity_id": "sensor.1d_111111111111_counter_b",
"unique_id": "/1D.111111111111/counter.B",
"injected_value": b" 248125",
"result": "248125",
"unit": "count",
"class": None,
},
],
},
"22.111111111111": {
"inject_reads": [
b"DS1822", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "22.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS1822",
"name": "22.111111111111",
},
"sensors": [
{
"entity_id": "sensor.22_111111111111_temperature",
"unique_id": "/22.111111111111/temperature",
"injected_value": ProtocolError,
"result": "unknown",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"26.111111111111": {
"inject_reads": [
b"DS2438", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "26.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS2438",
"name": "26.111111111111",
},
"sensors": [
{
"entity_id": "sensor.26_111111111111_temperature",
"unique_id": "/26.111111111111/temperature",
"injected_value": b" 25.123",
"result": "25.1",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
{
"entity_id": "sensor.26_111111111111_humidity",
"unique_id": "/26.111111111111/humidity",
"injected_value": b" 72.7563",
"result": "72.8",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
"disabled": True,
},
{
"entity_id": "sensor.26_111111111111_humidity_hih3600",
"unique_id": "/26.111111111111/HIH3600/humidity",
"injected_value": b" 73.7563",
"result": "73.8",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
"disabled": True,
},
{
"entity_id": "sensor.26_111111111111_humidity_hih4000",
"unique_id": "/26.111111111111/HIH4000/humidity",
"injected_value": b" 74.7563",
"result": "74.8",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
"disabled": True,
},
{
"entity_id": "sensor.26_111111111111_humidity_hih5030",
"unique_id": "/26.111111111111/HIH5030/humidity",
"injected_value": b" 75.7563",
"result": "75.8",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
"disabled": True,
},
{
"entity_id": "sensor.26_111111111111_humidity_htm1735",
"unique_id": "/26.111111111111/HTM1735/humidity",
"injected_value": ProtocolError,
"result": "unknown",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
"disabled": True,
},
{
"entity_id": "sensor.26_111111111111_pressure",
"unique_id": "/26.111111111111/B1-R1-A/pressure",
"injected_value": b" 969.265",
"result": "969.3",
"unit": PRESSURE_MBAR,
"class": DEVICE_CLASS_PRESSURE,
"disabled": True,
},
{
"entity_id": "sensor.26_111111111111_illuminance",
"unique_id": "/26.111111111111/S3-R1-A/illuminance",
"injected_value": b" 65.8839",
"result": "65.9",
"unit": LIGHT_LUX,
"class": DEVICE_CLASS_ILLUMINANCE,
"disabled": True,
},
{
"entity_id": "sensor.26_111111111111_voltage_vad",
"unique_id": "/26.111111111111/VAD",
"injected_value": b" 2.97",
"result": "3.0",
"unit": VOLT,
"class": DEVICE_CLASS_VOLTAGE,
"disabled": True,
},
{
"entity_id": "sensor.26_111111111111_voltage_vdd",
"unique_id": "/26.111111111111/VDD",
"injected_value": b" 4.74",
"result": "4.7",
"unit": VOLT,
"class": DEVICE_CLASS_VOLTAGE,
"disabled": True,
},
{
"entity_id": "sensor.26_111111111111_current",
"unique_id": "/26.111111111111/IAD",
"injected_value": b" 1",
"result": "1.0",
"unit": ELECTRICAL_CURRENT_AMPERE,
"class": DEVICE_CLASS_CURRENT,
"disabled": True,
},
],
},
"28.111111111111": {
"inject_reads": [
b"DS18B20", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "28.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS18B20",
"name": "28.111111111111",
},
"sensors": [
{
"entity_id": "sensor.28_111111111111_temperature",
"unique_id": "/28.111111111111/temperature",
"injected_value": b" 26.984",
"result": "27.0",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"3B.111111111111": {
"inject_reads": [
b"DS1825", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "3B.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS1825",
"name": "3B.111111111111",
},
"sensors": [
{
"entity_id": "sensor.3b_111111111111_temperature",
"unique_id": "/3B.111111111111/temperature",
"injected_value": b" 28.243",
"result": "28.2",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"42.111111111111": {
"inject_reads": [
b"DS28EA00", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "42.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS28EA00",
"name": "42.111111111111",
},
"sensors": [
{
"entity_id": "sensor.42_111111111111_temperature",
"unique_id": "/42.111111111111/temperature",
"injected_value": b" 29.123",
"result": "29.1",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"EF.111111111111": {
"inject_reads": [
b"HobbyBoards_EF", # read type
],
"device_info": {
"identifiers": {(DOMAIN, "EF.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "HobbyBoards_EF",
"name": "EF.111111111111",
},
"sensors": [
{
"entity_id": "sensor.ef_111111111111_humidity",
"unique_id": "/EF.111111111111/humidity/humidity_corrected",
"injected_value": b" 67.745",
"result": "67.7",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.ef_111111111111_humidity_raw",
"unique_id": "/EF.111111111111/humidity/humidity_raw",
"injected_value": b" 65.541",
"result": "65.5",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.ef_111111111111_temperature",
"unique_id": "/EF.111111111111/humidity/temperature",
"injected_value": b" 25.123",
"result": "25.1",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"EF.111111111112": {
"inject_reads": [
b"HB_MOISTURE_METER", # read type
b" 1", # read is_leaf_0
b" 1", # read is_leaf_1
b" 0", # read is_leaf_2
b" 0", # read is_leaf_3
],
"device_info": {
"identifiers": {(DOMAIN, "EF.111111111112")},
"manufacturer": "Maxim Integrated",
"model": "HB_MOISTURE_METER",
"name": "EF.111111111112",
},
"sensors": [
{
"entity_id": "sensor.ef_111111111112_wetness_0",
"unique_id": "/EF.111111111112/moisture/sensor.0",
"injected_value": b" 41.745",
"result": "41.7",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.ef_111111111112_wetness_1",
"unique_id": "/EF.111111111112/moisture/sensor.1",
"injected_value": b" 42.541",
"result": "42.5",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.ef_111111111112_moisture_2",
"unique_id": "/EF.111111111112/moisture/sensor.2",
"injected_value": b" 43.123",
"result": "43.1",
"unit": PRESSURE_CBAR,
"class": DEVICE_CLASS_PRESSURE,
},
{
"entity_id": "sensor.ef_111111111112_moisture_3",
"unique_id": "/EF.111111111112/moisture/sensor.3",
"injected_value": b" 44.123",
"result": "44.1",
"unit": PRESSURE_CBAR,
"class": DEVICE_CLASS_PRESSURE,
},
],
},
}
@pytest.mark.parametrize("device_id", MOCK_DEVICE_SENSORS.keys())
async def test_owserver_setup_valid_device(hass, device_id):
"""Test for 1-Wire device.
As they would be on a clean setup: all binary-sensors and switches disabled.
"""
await async_setup_component(hass, "persistent_notification", {})
entity_registry = mock_registry(hass)
device_registry = mock_device_registry(hass)
mock_device_sensor = MOCK_DEVICE_SENSORS[device_id]
dir_return_value = [f"/{device_id}/"]
read_side_effect = [device_id[0:2].encode()]
if "inject_reads" in mock_device_sensor:
read_side_effect += mock_device_sensor["inject_reads"]
expected_sensors = mock_device_sensor["sensors"]
for expected_sensor in expected_sensors:
read_side_effect.append(expected_sensor["injected_value"])
# Ensure enough read side effect
read_side_effect.extend([ProtocolError("Missing injected value")] * 10)
with patch("homeassistant.components.onewire.onewirehub.protocol.proxy") as owproxy:
owproxy.return_value.dir.return_value = dir_return_value
owproxy.return_value.read.side_effect = read_side_effect
assert await async_setup_component(hass, SENSOR_DOMAIN, MOCK_CONFIG)
await hass.async_block_till_done()
assert len(entity_registry.entities) == len(expected_sensors)
if len(expected_sensors) > 0:
device_info = mock_device_sensor["device_info"]
assert len(device_registry.devices) == 1
registry_entry = device_registry.async_get_device({(DOMAIN, device_id)}, set())
assert registry_entry is not None
assert registry_entry.identifiers == {(DOMAIN, device_id)}
assert registry_entry.manufacturer == device_info["manufacturer"]
assert registry_entry.name == device_info["name"]
assert registry_entry.model == device_info["model"]
for expected_sensor in expected_sensors:
entity_id = expected_sensor["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_sensor["unique_id"]
assert registry_entry.unit_of_measurement == expected_sensor["unit"]
assert registry_entry.device_class == expected_sensor["class"]
assert registry_entry.disabled == expected_sensor.get("disabled", False)
state = hass.states.get(entity_id)
if registry_entry.disabled:
assert state is None
else:
assert state.state == expected_sensor["result"]
'''
====================================================
Applying the Kalman Filter with Missing Observations
====================================================
This example shows how one may apply :class:`KalmanFilter` when some
measurements are missing.
While the Kalman Filter and Kalman Smoother are typically presented assuming a
measurement exists for every time step, this is not always the case in reality.
:class:`KalmanFilter` is implemented to recognize masked portions of numpy
arrays as missing measurements.
The figure drawn illustrates the trajectory of each dimension of the true
state, the estimated state using all measurements, and the estimated state
using every fifth measurement.
'''
import numpy as np
import pylab as pl
from pykalman import KalmanFilter
# specify parameters
random_state = np.random.RandomState(0)
transition_matrix = [[1, 0.1], [0, 1]]
transition_offset = [-0.1, 0.1]
observation_matrix = np.eye(2) + random_state.randn(2, 2) * 0.1
observation_offset = [1.0, -1.0]
initial_state_mean = [5, -5]
n_timesteps = 50
# sample from model
kf = KalmanFilter(
transition_matrices=transition_matrix,
observation_matrices=observation_matrix,
transition_offsets=transition_offset,
observation_offsets=observation_offset,
initial_state_mean=initial_state_mean,
random_state=0
)
states, observations_all = kf.sample(
n_timesteps, initial_state=initial_state_mean
)
# label half of the observations as missing
observations_missing = np.ma.array(
observations_all,
mask=np.zeros(observations_all.shape)
)
for t in range(n_timesteps):
if t % 5 != 0:
observations_missing[t] = np.ma.masked
# estimate state with filtering and smoothing
smoothed_states_all = kf.smooth(observations_all)[0]
smoothed_states_missing = kf.smooth(observations_missing)[0]
# draw estimates
pl.figure()
lines_true = pl.plot(states, color='b')
lines_smooth_all = pl.plot(smoothed_states_all, color='r')
lines_smooth_missing = pl.plot(smoothed_states_missing, color='g')
pl.legend(
(lines_true[0], lines_smooth_all[0], lines_smooth_missing[0]),
('true', 'all', 'missing'),
loc='lower right'
)
pl.show()
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Classes for interacting with Kubernetes API"""
from kubernetes.client import models as k8s
class Resources:
"""backwards compat for Resources"""
__slots__ = (
'request_memory',
'request_cpu',
'limit_memory',
'limit_cpu',
'limit_gpu',
'request_ephemeral_storage',
'limit_ephemeral_storage',
)
"""
:param request_memory: requested memory
:type request_memory: str
:param request_cpu: requested CPU number
:type request_cpu: float | str
:param request_ephemeral_storage: requested ephemeral storage
:type request_ephemeral_storage: str
:param limit_memory: limit for memory usage
:type limit_memory: str
:param limit_cpu: Limit for CPU used
:type limit_cpu: float | str
:param limit_gpu: Limits for GPU used
:type limit_gpu: int
:param limit_ephemeral_storage: Limit for ephemeral storage
:type limit_ephemeral_storage: float | str
"""
def __init__(
self,
request_memory=None,
request_cpu=None,
request_ephemeral_storage=None,
limit_memory=None,
limit_cpu=None,
limit_gpu=None,
limit_ephemeral_storage=None,
):
self.request_memory = request_memory
self.request_cpu = request_cpu
self.request_ephemeral_storage = request_ephemeral_storage
self.limit_memory = limit_memory
self.limit_cpu = limit_cpu
self.limit_gpu = limit_gpu
self.limit_ephemeral_storage = limit_ephemeral_storage
def to_k8s_client_obj(self):
"""
Converts to k8s object.
@rtype: object
"""
limits_raw = {
'cpu': self.limit_cpu,
'memory': self.limit_memory,
'nvidia.com/gpu': self.limit_gpu,
'ephemeral-storage': self.limit_ephemeral_storage,
}
requests_raw = {
'cpu': self.request_cpu,
'memory': self.request_memory,
'ephemeral-storage': self.request_ephemeral_storage,
}
limits = {k: v for k, v in limits_raw.items() if v}
requests = {k: v for k, v in requests_raw.items() if v}
resource_req = k8s.V1ResourceRequirements(limits=limits, requests=requests)
return resource_req
class Port:
"""POD port"""
__slots__ = ('name', 'container_port')
def __init__(self, name=None, container_port=None):
"""Creates port"""
self.name = name
self.container_port = container_port
def to_k8s_client_obj(self):
"""
Converts to k8s object.
:rtype: object
"""
return k8s.V1ContainerPort(name=self.name, container_port=self.container_port)
# Copyright (C) 2014-2016 Andrey Antukh
# Copyright (C) 2014-2016 Jesús Espino
# Copyright (C) 2014-2016 David Barragán
# Copyright (C) 2014-2016 Alejandro Alonso
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
from django.db import transaction as tx
from django.db import IntegrityError
from django.utils.translation import ugettext as _
from django.apps import apps
from taiga.base.utils.slug import slugify_uniquely
from taiga.base import exceptions as exc
from taiga.auth.services import send_register_email
from taiga.auth.services import make_auth_response_data, get_membership_by_token
from taiga.auth.signals import user_registered as user_registered_signal
from . import connector
@tx.atomic
def gitlab_register(username:str, email:str, full_name:str, gitlab_id:int, bio:str, token:str=None):
"""
Register a new user from gitlab.
This can raise `exc.IntegrityError` exceptions in
case of conflics found.
:returns: User
"""
auth_data_model = apps.get_model("users", "AuthData")
user_model = apps.get_model("users", "User")
try:
# Gitlab user association exist?
auth_data = auth_data_model.objects.get(key="gitlab", value=gitlab_id)
user = auth_data.user
except auth_data_model.DoesNotExist:
try:
# Is a user with the same email as the gitlab user?
user = user_model.objects.get(email=email)
auth_data_model.objects.create(user=user, key="gitlab", value=gitlab_id, extra={})
except user_model.DoesNotExist:
# Create a new user
username_unique = slugify_uniquely(username, user_model, slugfield="username")
user = user_model.objects.create(email=email,
username=username_unique,
full_name=full_name,
bio=bio)
auth_data_model.objects.create(user=user, key="gitlab", value=gitlab_id, extra={})
send_register_email(user)
user_registered_signal.send(sender=user.__class__, user=user)
if token:
membership = get_membership_by_token(token)
try:
membership.user = user
membership.save(update_fields=["user"])
except IntegrityError:
raise exc.IntegrityError(_("This user is already a member of the project."))
return user
def gitlab_login_func(request):
code = request.DATA.get('code', None)
token = request.DATA.get('token', None)
redirectUri = request.DATA.get('redirectUri', None)
email, user_info = connector.me(code, redirectUri)
user = gitlab_register(username=user_info.username,
email=email,
full_name=user_info.full_name,
gitlab_id=user_info.id,
bio=user_info.bio,
token=token)
data = make_auth_response_data(user)
return data
# Copyright (c) 1999-2002 Gary Strangman; All Rights Reserved.
#
# This software is distributable under the terms of the GNU
# General Public License (GPL) v2, the text of which can be found at
# http://www.gnu.org/copyleft/gpl.html. Installing, importing or otherwise
# using this module constitutes acceptance of the terms of this License.
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fittness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
stats.py module
(Requires pstat.py module.)
#################################################
####### Written by: Gary Strangman ###########
####### Last modified: May 10, 2002 ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
IMPORTANT: There are really *3* sets of functions. The first set has an 'l'
prefix, which can be used with list or tuple arguments. The second set has
an 'a' prefix, which can accept NumPy array arguments. These latter
functions are defined only when NumPy is available on the system. The third
type has NO prefix (i.e., has the name that appears below). Functions of
this set are members of a "Dispatch" class, c/o David Ascher. This class
allows different functions to be called depending on the type of the passed
arguments. Thus, stats.mean is a member of the Dispatch class and
stats.mean(range(20)) will call stats.lmean(range(20)) while
stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)).
This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
while "print stats.mean.__doc__" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: geometricmean
harmonicmean
mean
median
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
skewtest (for Numpy arrays only)
kurtosistest (for Numpy arrays only)
normaltest (for Numpy arrays only)
ALTERED VERSIONS: tmean (for Numpy arrays only)
tvar (for Numpy arrays only)
tmin (for Numpy arrays only)
tmax (for Numpy arrays only)
tstdev (for Numpy arrays only)
tsem (for Numpy arrays only)
describe
FREQUENCY STATS: itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
samplevar
samplestdev
signaltonoise (for Numpy arrays only)
var
stdev
sterr
sem
z
zs
zmap (for Numpy arrays only)
TRIMMING FCNS: threshold (for Numpy arrays only)
trimboth
trim1
round (round all vals to 'n' decimals; Numpy only)
CORRELATION FCNS: covariance (for Numpy arrays only)
correlation (for Numpy arrays only)
paired
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxont
kruskalwallish
friedmanchisquare
PROBABILITY CALCS: chisqprob
erfcc
zprob
ksprob
fprob
betacf
gammln
betai
ANOVA FUNCTIONS: F_oneway
F_value
SUPPORT FUNCTIONS: writecc
incr
sign (for Numpy arrays only)
sum
cumsum
ss
summult
sumdiffsquared
square_of_sums
shellsort
rankdata
outputpairedstats
findwithin
"""
# CHANGE LOG:
# ===========
# 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows
# 02-05-10 ... fixed lchisqprob indentation (failed when df=even)
# 00-12-28 ... removed aanova() to separate module, fixed licensing to
# match Python License, fixed doc string & imports
# 00-04-13 ... pulled all "global" statements, except from aanova()
# added/fixed lots of documentation, removed io.py dependency
# changed to version 0.5
# 99-11-13 ... added asign() function
# 99-11-01 ... changed version to 0.4 ... enough incremental changes now
# 99-10-25 ... added acovariance and acorrelation functions
# 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors
# added aglm function (crude, but will be improved)
# 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, avar, etc. to
# all handle lists of 'dimension's and keepdims
# REMOVED ar0, ar2, ar3, ar4 and replaced them with around
# reinserted fixes for abetai to avoid math overflows
# 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to
# handle multi-dimensional arrays (whew!)
# 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990)
# added anormaltest per same reference
# re-wrote azprob to calc arrays of probs all at once
# 99-08-22 ... edited attest_ind printing section so arrays could be rounded
# 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on
# short/byte arrays (mean of #s btw 100-300 = -150??)
# 99-08-09 ... fixed asum so that the None case works for Byte arrays
# 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays
# 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap)
# 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0])
# 04/11/99 ... added asignaltonoise, athreshold functions, changed all
# max/min in array section to N.maximum/N.minimum,
# fixed square_of_sums to prevent integer overflow
# 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums
# 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions
# 02/28/99 ... Fixed aobrientransform to return an array rather than a list
# 01/15/99 ... Essentially ceased updating list-versions of functions (!!!)
# 01/13/99 ... CHANGED TO VERSION 0.3
# fixed bug in a/lmannwhitneyu p-value calculation
# 12/31/98 ... fixed variable-name bug in ldescribe
# 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix)
# 12/16/98 ... changed amedianscore to return float (not array) for 1 score
# 12/14/98 ... added atmin and atmax functions
# removed umath from import line (not needed)
# l/ageometricmean modified to reduce chance of overflows (take
# nth root first, then multiply)
# 12/07/98 ... added __version__variable (now 0.2)
# removed all 'stats.' from anova() fcn
# 12/06/98 ... changed those functions (except shellsort) that altered
# arguments in-place ... cumsum, ranksort, ...
# updated (and fixed some) doc-strings
# 12/01/98 ... added anova() function (requires NumPy)
# incorporated Dispatch class
# 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean
# added 'asum' function (added functionality to N.add.reduce)
# fixed both moment and amoment (two errors)
# changed name of skewness and askewness to skew and askew
# fixed (a)histogram (which sometimes counted points = len(inlist)/2.0:
cfbin = i
break
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = cumhist[cfbin-1]
freq = float(hist[cfbin]) # frequency IN the 50%ile bin
median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula
return median
def lmedianscore(inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = sorted(copy.deepcopy(inlist))
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist)/2 # integer division correct
median = float(newlist[index] + newlist[index-1]) / 2
else:
index = len(newlist)/2 # int divsion gives mid value when count from 0
median = newlist[index]
return median
def lmode(inlist):
"""
Returns a list of the modal (most common) score(s) in the passed
list. If there is more than one such score, all are returned. The
bin-count for the mode(s) is also returned.
Usage: lmode(inlist)
Returns: bin-count for mode(s), a list of modal value(s)
"""
scores = sorted(pstat.unique(inlist))
freq = []
for item in scores:
freq.append(inlist.count(item))
maxfreq = max(freq)
mode = []
stillmore = 1
while stillmore:
try:
indx = freq.index(maxfreq)
mode.append(scores[indx])
del freq[indx]
del scores[indx]
except ValueError:
stillmore = 0
return maxfreq, mode
# MOMENTS
def lmoment(inlist, moment=1):
"""
Calculates the nth moment about the mean for a sample (defaults to
the 1st moment). Used to calculate coefficients of skewness and kurtosis.
Usage: lmoment(inlist,moment=1)
Returns: appropriate moment (r) from ... 1/n * SUM((inlist(i)-mean)**r)
"""
if moment == 1:
return 0.0
else:
mn = mean(inlist)
n = len(inlist)
s = 0
for x in inlist:
s = s + (x-mn)**moment
return s/float(n)
def lvariation(inlist):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6.
Usage: lvariation(inlist)
"""
return 100.0*samplestdev(inlist)/float(mean(inlist))
def lskew(inlist):
"""
Returns the skewness of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lskew(inlist)
"""
return moment(inlist, 3)/pow(moment(inlist, 2), 1.5)
def lkurtosis(inlist):
"""
Returns the kurtosis of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lkurtosis(inlist)
"""
return moment(inlist, 4)/pow(moment(inlist, 2), 2.0)
def ldescribe(inlist):
"""
Returns some descriptive statistics of the passed list (assumed to be 1D).
Usage: ldescribe(inlist)
Returns: n, mean, standard deviation, skew, kurtosis
"""
n = len(inlist)
mm = (min(inlist), max(inlist))
m = mean(inlist)
sd = stdev(inlist)
sk = skew(inlist)
kurt = kurtosis(inlist)
return n, mm, m, sd, sk, kurt
# FREQUENCY STATS
def litemfreq(inlist):
"""
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = sorted(pstat.unique(inlist))
freq = []
for item in scores:
freq.append(inlist.count(item))
return pstat.abut(scores, freq)
def lscoreatpercentile(inlist, percent):
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
print("\nDividing percent>1 by 100 in lscoreatpercentile().\n")
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist)
cumhist = cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def lpercentileofscore(inlist, score, histbins=10, defaultlimits=None):
"""
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
h, lrl, binsize, extras = histogram(inlist, histbins, defaultlimits)
cumhist = cumsum(copy.deepcopy(h))
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
return pct
def lhistogram(inlist, numbins=10, defaultreallimits=None, printextras=0):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
if (defaultreallimits is not None):
if type(defaultreallimits) not in [list, tuple] or len(defaultreallimits) == 1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.0001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth = (max(inlist)-min(inlist))/float(numbins) + 1 # 1=>cover all
binsize = (max(inlist)-min(inlist)+estbinwidth)/float(numbins)
lowerreallimit = min(inlist) - binsize/2 # lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except Exception:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =', extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def lcumfreq(inlist, numbins=10, defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h, l, b, e = histogram(inlist, numbins, defaultreallimits)
cumhist = cumsum(copy.deepcopy(h))
return cumhist, l, b, e
def lrelfreq(inlist, numbins=10, defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h, l, b, e = histogram(inlist, numbins, defaultreallimits)
for i in range(len(h)):
h[i] = h[i]/float(len(inlist))
return h, l, b, e
# VARIABILITY FUNCTIONS
def lobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. From
Maxwell and Delaney, p.112.
Usage: lobrientransform(*args)
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = [0.0]*k
v = [0.0]*k
m = [0.0]*k
nargs = []
for i in range(k):
nargs.append(copy.deepcopy(args[i]))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Problem in obrientransform.')
else:
return nargs
def lsamplevar(inlist):
"""
Returns the variance of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample variance only).
Usage: lsamplevar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = []
for item in inlist:
deviations.append(item-mn)
return ss(deviations)/float(n)
def lsamplestdev(inlist):
"""
Returns the standard deviation of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample stdev only).
Usage: lsamplestdev(inlist)
"""
return math.sqrt(samplevar(inlist))
def lvar(inlist):
"""
Returns the variance of the values in the passed list using N-1
for the denominator (i.e., for estimating population variance).
Usage: lvar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = [0]*len(inlist)
for i in range(len(inlist)):
deviations[i] = inlist[i] - mn
return ss(deviations)/float(n-1)
def lstdev(inlist):
"""
Returns the standard deviation of the values in the passed list
using N-1 in the denominator (i.e., to estimate population stdev).
Usage: lstdev(inlist)
"""
return math.sqrt(var(inlist))
def lsterr(inlist):
"""
Returns the standard error of the values in the passed list using N-1
in the denominator (i.e., to estimate population standard error).
Usage: lsterr(inlist)
"""
return stdev(inlist) / float(math.sqrt(len(inlist)))
def lsem(inlist):
"""
Returns the estimated standard error of the mean (sx-bar) of the
values in the passed list. sem = stdev / sqrt(n)
Usage: lsem(inlist)
"""
sd = stdev(inlist)
n = len(inlist)
return sd/math.sqrt(n)
def lz(inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z
def lzs(inlist):
"""
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
zscores = []
for item in inlist:
zscores.append(z(inlist, item))
return zscores
# TRIMMING FUNCTIONS
def ltrimboth(l, proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost'
10% of scores. Assumes list is sorted by magnitude. Slices off LESS if
proportion results in a non-integer slice index (i.e., conservatively
slices off proportiontocut).
Usage: ltrimboth (l,proportiontocut)
Returns: trimmed version of list l
"""
lowercut = int(proportiontocut*len(l))
uppercut = len(l) - lowercut
return l[lowercut:uppercut]
def ltrim1(l, proportiontocut, tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of list l
"""
if tail == 'right':
lowercut = 0
uppercut = len(l) - int(proportiontocut*len(l))
elif tail == 'left':
lowercut = int(proportiontocut*len(l))
uppercut = len(l)
return l[lowercut:uppercut]
# CORRELATION FUNCTIONS
def lpaired(x, y):
"""
Interactively determines the type of data and then runs the
appropriated statistic for paired group data.
Usage: lpaired(x,y)
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i', 'r', 'I', 'R', 'c', 'C']:
print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = input()
if samples in ['i', 'I', 'r', 'R']:
print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x, y)
f, p = F_oneway(pstat.colex(r, 0), pstat.colex(r, 1))
if p < 0.05:
vartype = 'unequal, p='+str(round(p, 4))
else:
vartype = 'equal'
print(vartype)
if samples in ['i', 'I']:
if vartype[0] == 'e':
t, p = ttest_ind(x, y, 0)
print('\nIndependent samples t-test: ', round(t, 4), round(p, 4))
else:
if len(x) > 20 or len(y) > 20:
z, p = ranksums(x, y)
print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4))
else:
u, p = mannwhitneyu(x, y)
print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4))
else: # RELATED SAMPLES
if vartype[0] == 'e':
t, p = ttest_rel(x, y, 0)
print('\nRelated samples t-test: ', round(t, 4), round(p, 4))
else:
t, p = ranksums(x, y)
print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c', 'C', 'r', 'R', 'd', 'D']:
print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = input()
if corrtype in ['c', 'C']:
m, b, r, p, see = linregress(x, y)
print('\nLinear regression for continuous variables ...')
lol = [['Slope', 'Intercept', 'r', 'Prob', 'SEestimate'], [round(m, 4), round(b, 4), round(r, 4), round(p, 4), round(see, 4)]]
pstat.printcc(lol)
elif corrtype in ['r', 'R']:
r, p = spearmanr(x, y)
print('\nCorrelation for ranked variables ...')
print("Spearman's r: ", round(r, 4), round(p, 4))
else: # DICHOTOMOUS
r, p = pointbiserialr(x, y)
print('\nAssuming x contains a dichotomous variable ...')
print('Point Biserial r: ', round(r, 4), round(p, 4))
print('\n\n')
return None
def lpearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (2nd), p.195.
Usage: lpearsonr(x,y) where x and y are equal-length lists
Returns: Pearson's r value, two-tailed p-value
"""
TINY = 1.0e-30
if len(x) != len(y):
raise ValueError('Input values not paired in pearsonr. Aborting.')
n = len(x)
x = [float(_) for _ in x]
y = [float(_) for _ in y]
r_num = n*(summult(x, y)) - sum(x)*sum(y)
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = (r_num / r_den) # denominator already a float
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df, 0.5, df/float(df+t*t))
return r, prob
def lspearmanr(x, y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
if len(x) != len(y):
raise ValueError('Input values not paired in spearmanr. Aborting.')
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx, ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df, 0.5, df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs
def lpointbiserialr(x, y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: lpointbiserialr(x,y) where x,y are equal-length lists
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
if len(x) != len(y):
raise ValueError('INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.')
data = pstat.abut(x, y)
categories = pstat.unique(x)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required for pointbiserialr().")
else: # there are 2 categories, continue
codemap = pstat.abut(categories, range(2))
pstat.recode(data, codemap, 0) # recoded
x = pstat.linexand(data, 0, categories[0])
y = pstat.linexand(data, 0, categories[1])
xmean = mean(pstat.colex(x, 1))
ymean = mean(pstat.colex(y, 1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/samplestdev(pstat.colex(data, 1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = betai(0.5*df, 0.5, df/(df+t*t)) # t already a float
return rpb, prob
def lkendalltau(x, y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j, len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither list has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss - 1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def llinregress(x, y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) != len(y):
raise ValueError('Input values not paired in linregress. Aborting.')
n = len(x)
x = [float(_) for _ in x]
y = [float(_) for _ in y]
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x, y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df, 0.5, df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
# INFERENTIAL STATISTICS
def lttest_1samp(a, popmean, printit=0, name='Sample', writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: lttest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
x = mean(a)
v = var(a)
n = len(a)
df = n-1
svar = ((n-1)*v)/float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = betai(0.5*df, 0.5, float(df)/(df+t*t))
if printit != 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit, writemode,
'Population', '--', popmean, 0, 0, 0,
name, n, x, v, min(a), max(a),
statname, t, prob)
return t, prob
def lttest_ind(a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of
scores a, and b. From Numerical Recipies, p.483. If printit=1, results
are printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed prob
"""
x1 = mean(a)
x2 = mean(b)
v1 = stdev(a)**2
v2 = stdev(b)**2
n1 = len(a)
n2 = len(b)
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2)/float(df)
t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))
prob = betai(0.5*df, 0.5, df/(df+t*t))
if printit != 0:
statname = 'Independent samples T-test.'
outputpairedstats(printit, writemode,
name1, n1, x1, v1, min(a), max(a),
name2, n2, x2, v2, min(b), max(b),
statname, t, prob)
return t, prob
def lttest_rel(a, b, printit=0, name1='Sample1', name2='Sample2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores,
a and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output to
'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_rel(a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a')
Returns: t-value, two-tailed prob
"""
if len(a) != len(b):
raise ValueError('Unequal length lists in ttest_rel.')
x1 = mean(a)
x2 = mean(b)
v1 = var(a)
v2 = var(b)
n = len(a)
cov = 0
for i in range(len(a)):
cov = cov + (a[i]-x1) * (b[i]-x2)
df = n-1
cov = cov / float(df)
sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))
t = (x1-x2)/sd
prob = betai(0.5*df, 0.5, df/(df+t*t))
if printit != 0:
statname = 'Related samples T-test.'
outputpairedstats(printit, writemode,
name1, n, x1, v1, min(a), max(a),
name2, n, x2, v2, min(b), max(b),
statname, t, prob)
return t, prob
def lchisquare(f_obs, f_exp=None):
"""
Calculates a one-way chi square for list of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs) # number of groups
if f_exp is None:
f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
chisq = 0
for i in range(len(f_obs)):
chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])
return chisq, chisqprob(chisq, k-1)
def lks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1 = data1[j1]
d2 = data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except Exception:
prob = 1.0
return d, prob
def lmannwhitneyu(x, y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. NOTE: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U found in the tables. Equivalent to Kruskal-Wallis H with
just 2 groups.
Usage: lmannwhitneyu(data)
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(x+y)
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1, u2)
smallu = min(u1, u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in lmannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def ltiecorrect(rankvals):
"""
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: ltiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted, posn = shellsort(rankvals)
n = len(sorted)
T = 0.0
i = 0
while (i < n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i < n-1) and (sorted[i] == sorted[i+1]):
nties = nties + 1
i = i + 1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def lranksums(x, y):
"""
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 - zprob(abs(z)))
return z, prob
def lwilcoxont(x, y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: lwilcoxont(x,y)
Returns: a t-statistic, two-tail probability estimate
"""
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxont. Aborting.')
d = []
for i in range(len(x)):
diff = x[i] - y[i]
if diff != 0:
d.append(diff)
count = len(d)
absd = [abs(_) for _ in d]
absranked = rankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
prob = 2*(1.0 - zprob(abs(z)))
return wt, prob
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = [len(_) for _ in args]
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish')
h = h / float(T)
return h, chisqprob(h, df)
def lfriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
data = pstat.abut(*tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i])**2
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq, k-1)
# PROBABILITY CALCULATIONS
def lchisqprob(chisq, df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <= 0 or df < 1:
return 1.0
a = 0.5 * chisq
if df % 2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s
def lerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans
def lzprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat.
Usage: lzprob(z)
"""
Z_MAX = 6.0 # maximum meaningful z-value
if z == 0.0:
x = 0.0
else:
y = 0.5 * math.fabs(z)
if y >= (Z_MAX*0.5):
x = 1.0
elif (y < 1.0):
w = y*y
x = ((((((((0.000124818987 * w
- 0.001075204047) * w + 0.005198775019) * w
- 0.019198292004) * w + 0.059054035642) * w
- 0.151968751364) * w + 0.319152932694) * w
- 0.531923007300) * w + 0.797884560593) * y * 2.0
else:
y = y - 2.0
x = (((((((((((((-0.000045255659 * y
+ 0.000152529290) * y - 0.000019538132) * y
- 0.000676904986) * y + 0.001390604284) * y
- 0.000794620820) * y - 0.002034254874) * y
+ 0.006549791214) * y - 0.010557625006) * y
+ 0.011630447319) * y - 0.009279453341) * y
+ 0.005353579108) * y - 0.002141268741) * y
+ 0.000535310849) * y + 0.999936657524
if z > 0.0:
prob = ((x+1.0)*0.5)
else:
prob = ((1.0-x)*0.5)
return prob
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1, 201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 # Get here only if fails to converge; was 0.0!!
def lfprob(dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p
def lbetacf(a, b, x):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: Numerical Recipies in C.)
Usage: lbetacf(a,b,x)
"""
ITMAX = 200
EPS = 3.0e-7
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
if (abs(az-aold) < (EPS*abs(az))):
return az
print('a or b too big, or ITMAX too small in Betacf.')
def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser)
def lbetai(a, b, x):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: Numerical Recipies in C.)
Usage: lbetai(a,b,x)
"""
if (x < 0.0 or x > 1.0):
raise ValueError('Bad x in lbetai')
if (x == 0.0 or x == 1.0):
bt = 0.0
else:
bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b
* math.log(1.0-x))
if (x < (a+1.0)/(a+b+2.0)):
return bt*betacf(a, b, x)/float(a)
else:
return 1.0-bt*betacf(b, a, 1.0-x)/float(b)
# ANOVA CALCULATIONS
def lF_oneway(*lists):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
"""
a = len(lists) # ANOVA on 'a' groups, each in it's own list
alldata = []
for i in range(len(lists)):
alldata = alldata + lists[i]
alldata = N.array(alldata)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + asquare_of_sums(N.array(list))/float(len(list))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = a-1
dfwn = bign - a
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn, dfwn, f)
return f, prob
def lF_value(ER, EF, dfnum, dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR-dfF = degrees of freedom of the numerator
dfF = degrees of freedom associated with the denominator/Full model
Usage: lF_value(ER,EF,dfnum,dfden)
"""
return ((ER-EF)/float(dfnum) / (EF/float(dfden)))
# SUPPORT FUNCTIONS
def writecc(listoflists, file, writetype='w', extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [list, tuple]:
listoflists = [listoflists]
outfile = open(file, writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i] == '\n' or listoflists[i] == 'dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print, col)
items = [pstat.makestr(_) for _ in items]
maxsize[col] = max(map(len, items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes, maxsize))
else:
outfile.write(pstat.lineincustcols(row, maxsize))
outfile.write('\n')
outfile.close()
return None
def lincr(l, cap): # to increment a list up to a max-list of 'cap'
"""
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done
l[i] = 0
l[i+1] = l[i+1] + 1
elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished
l = -1
return l
def lsum(inlist):
"""
Returns the sum of the items in the passed list.
Usage: lsum(inlist)
"""
s = 0
for item in inlist:
s = s + item
return s
def lcumsum(inlist):
"""
Returns a list consisting of the cumulative sum of the items in the
passed list.
Usage: lcumsum(inlist)
"""
newlist = copy.deepcopy(inlist)
for i in range(1, len(newlist)):
newlist[i] = newlist[i] + newlist[i-1]
return newlist
def lss(inlist):
"""
Squares each value in the passed list, adds up these squares and
returns the result.
Usage: lss(inlist)
"""
ss = 0
for item in inlist:
ss = ss + item*item
return ss
def lsummult(list1, list2):
"""
Multiplies elements in list1 and list2, element by element, and
returns the sum of all resulting multiplications. Must provide equal
length lists.
Usage: lsummult(list1,list2)
"""
if len(list1) != len(list2):
raise ValueError("Lists not equal length in summult.")
s = 0
for item1, item2 in pstat.abut(list1, list2):
s = s + item1*item2
return s
def lsumdiffsquared(x, y):
"""
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds
def lsquare_of_sums(inlist):
"""
Adds the values in the passed list, squares the sum, and returns
the result.
Usage: lsquare_of_sums(inlist)
Returns: sum(inlist[i])**2
"""
s = sum(inlist)
return float(s)*s
def lshellsort(inlist):
"""
Shellsort algorithm. Sorts a 1D-list.
Usage: lshellsort(inlist)
Returns: sorted-inlist, sorting-index-vector (for original list)
"""
n = len(inlist)
svec = copy.deepcopy(inlist)
ivec = list(range(n))
gap = n/2 # integer division needed
while gap > 0:
for i in range(gap, n):
for j in range(i-gap, -1, -gap):
while j >= 0 and svec[j] > svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def lrankdata(inlist):
"""
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: lrankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
n = len(inlist)
svec, ivec = shellsort(inlist)
sumranks = 0
dupcount = 0
newlist = [0]*n
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i == n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1, i+1):
newlist[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newlist
def outputpairedstats(fname, writemode, name1, n1, m1, se1, min1, max1, name2, n2, m2, se2, min2, max2, statname, stat, prob):
"""
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
"""
suffix = '' # for *s after the p-value
try:
prob.shape
prob = prob[0]
except Exception:
pass
if prob < 0.001:
suffix = ' ***'
elif prob < 0.01:
suffix = ' **'
elif prob < 0.05:
suffix = ' *'
title = [['Name', 'N', 'Mean', 'SD', 'Min', 'Max']]
lofl = title+[[name1, n1, round(m1, 3), round(math.sqrt(se1), 3), min1, max1],
[name2, n2, round(m2, 3), round(math.sqrt(se2), 3), min2, max2]]
if not isinstance(fname, str) or len(fname) == 0:
print()
print(statname)
print()
pstat.printcc(lofl)
print()
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except Exception:
pass
print('Test statistic = ', round(stat, 3), ' p = ', round(prob, 3), suffix)
print()
else:
file = open(fname, writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl, fname, 'a')
file = open(fname, 'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except Exception:
pass
file.write(pstat.list2string(['\nTest statistic = ', round(stat, 4), ' p = ', round(prob, 4), suffix, '\n\n']))
file.close()
return None
def lfindwithin(data):
"""
Returns an integer representing a binary vector, where 1=within-
subject factor, 0=between. Input equals the entire data 2D list (i.e.,
column 0=random factor, column -1=measured values (those two are skipped).
Note: input data is in |Stat format ... a list of lists ("2D list") with
one row per measured value, first column=subject identifier, last column=
score, one in-between column per factor (these columns contain level
designations on each factor). See also stats.anova.__doc__.
Usage: lfindwithin(data) data in |Stat format
"""
numfact = len(data[0])-1
withinvec = 0
for col in range(1, numfact):
examplelevel = pstat.unique(pstat.colex(data, col))[0]
rows = pstat.linexand(data, col, examplelevel) # get 1 level of this factor
factsubjs = pstat.unique(pstat.colex(rows, 0))
allsubjs = pstat.unique(pstat.colex(data, 0))
if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?
withinvec = withinvec + (1 << col)
return withinvec
# DISPATCH LISTS AND TUPLES TO ABOVE FCNS
# CENTRAL TENDENCY:
geometricmean = Dispatch((lgeometricmean, (list, tuple)), )
harmonicmean = Dispatch((lharmonicmean, (list, tuple)), )
mean = Dispatch((lmean, (list, tuple)), )
median = Dispatch((lmedian, (list, tuple)), )
medianscore = Dispatch((lmedianscore, (list, tuple)), )
mode = Dispatch((lmode, (list, tuple)), )
# MOMENTS:
moment = Dispatch((lmoment, (list, tuple)), )
variation = Dispatch((lvariation, (list, tuple)), )
skew = Dispatch((lskew, (list, tuple)), )
kurtosis = Dispatch((lkurtosis, (list, tuple)), )
describe = Dispatch((ldescribe, (list, tuple)), )
# FREQUENCY STATISTICS:
itemfreq = Dispatch((litemfreq, (list, tuple)), )
scoreatpercentile = Dispatch((lscoreatpercentile, (list, tuple)), )
percentileofscore = Dispatch((lpercentileofscore, (list, tuple)), )
histogram = Dispatch((lhistogram, (list, tuple)), )
cumfreq = Dispatch((lcumfreq, (list, tuple)), )
relfreq = Dispatch((lrelfreq, (list, tuple)), )
# VARIABILITY:
obrientransform = Dispatch((lobrientransform, (list, tuple)), )
samplevar = Dispatch((lsamplevar, (list, tuple)), )
samplestdev = Dispatch((lsamplestdev, (list, tuple)), )
var = Dispatch((lvar, (list, tuple)), )
stdev = Dispatch((lstdev, (list, tuple)), )
sterr = Dispatch((lsterr, (list, tuple)), )
sem = Dispatch((lsem, (list, tuple)), )
z = Dispatch((lz, (list, tuple)), )
zs = Dispatch((lzs, (list, tuple)), )
# TRIMMING FCNS:
trimboth = Dispatch((ltrimboth, (list, tuple)), )
trim1 = Dispatch((ltrim1, (list, tuple)), )
# CORRELATION FCNS:
paired = Dispatch((lpaired, (list, tuple)), )
pearsonr = Dispatch((lpearsonr, (list, tuple)), )
spearmanr = Dispatch((lspearmanr, (list, tuple)), )
pointbiserialr = Dispatch((lpointbiserialr, (list, tuple)), )
kendalltau = Dispatch((lkendalltau, (list, tuple)), )
linregress = Dispatch((llinregress, (list, tuple)), )
# INFERENTIAL STATS:
ttest_1samp = Dispatch((lttest_1samp, (list, tuple)), )
ttest_ind = Dispatch((lttest_ind, (list, tuple)), )
ttest_rel = Dispatch((lttest_rel, (list, tuple)), )
chisquare = Dispatch((lchisquare, (list, tuple)), )
ks_2samp = Dispatch((lks_2samp, (list, tuple)), )
mannwhitneyu = Dispatch((lmannwhitneyu, (list, tuple)), )
ranksums = Dispatch((lranksums, (list, tuple)), )
tiecorrect = Dispatch((ltiecorrect, (list, tuple)), )
wilcoxont = Dispatch((lwilcoxont, (list, tuple)), )
kruskalwallish = Dispatch((lkruskalwallish, (list, tuple)), )
friedmanchisquare = Dispatch((lfriedmanchisquare, (list, tuple)), )
# PROBABILITY CALCS:
chisqprob = Dispatch((lchisqprob, (int, float)), )
zprob = Dispatch((lzprob, (int, float)), )
ksprob = Dispatch((lksprob, (int, float)), )
fprob = Dispatch((lfprob, (int, float)), )
betacf = Dispatch((lbetacf, (int, float)), )
betai = Dispatch((lbetai, (int, float)), )
erfcc = Dispatch((lerfcc, (int, float)), )
gammln = Dispatch((lgammln, (int, float)), )
# ANOVA FUNCTIONS:
F_oneway = Dispatch((lF_oneway, (list, tuple)), )
F_value = Dispatch((lF_value, (list, tuple)), )
# SUPPORT FUNCTIONS:
incr = Dispatch((lincr, (list, tuple)), )
sum = Dispatch((lsum, (list, tuple)), )
cumsum = Dispatch((lcumsum, (list, tuple)), )
ss = Dispatch((lss, (list, tuple)), )
summult = Dispatch((lsummult, (list, tuple)), )
square_of_sums = Dispatch((lsquare_of_sums, (list, tuple)), )
sumdiffsquared = Dispatch((lsumdiffsquared, (list, tuple)), )
shellsort = Dispatch((lshellsort, (list, tuple)), )
rankdata = Dispatch((lrankdata, (list, tuple)), )
findwithin = Dispatch((lfindwithin, (list, tuple)), )
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
import Numeric
N = Numeric
import LinearAlgebra
LA = LinearAlgebra
# ACENTRAL TENDENCY
def ageometricmean(inarray, dimension=None, keepdims=0):
"""
Calculates the geometric mean of the values in the passed array.
That is: n-th root of (x1 * x2 * ... * xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: ageometricmean(inarray,dimension=None,keepdims=0)
Returns: geometric mean computed over dim(s) listed in dimension
"""
inarray = N.array(inarray, N.Float)
if dimension is None:
inarray = N.ravel(inarray)
size = len(inarray)
mult = N.power(inarray, 1.0/size)
mult = N.multiply.reduce(mult)
elif type(dimension) in [int, float]:
size = inarray.shape[dimension]
mult = N.power(inarray, 1.0/size)
mult = N.multiply.reduce(mult, dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
N.reshape(sum, shp)
else: # must be a SEQUENCE of dims to average over
dims = sorted(dimension)
dims.reverse()
size = N.array(N.multiply.reduce(N.take(inarray.shape, dims)), N.Float)
mult = N.power(inarray, 1.0/size)
for dim in dims:
mult = N.multiply.reduce(mult, dim)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
mult = N.reshape(mult, shp)
return mult
def aharmonicmean(inarray, dimension=None, keepdims=0):
"""
Calculates the harmonic mean of the values in the passed array.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: aharmonicmean(inarray,dimension=None,keepdims=0)
Returns: harmonic mean computed over dim(s) in dimension
"""
inarray = inarray.astype(N.Float)
if dimension is None:
inarray = N.ravel(inarray)
size = len(inarray)
s = N.add.reduce(1.0 / inarray)
elif type(dimension) in [int, float]:
size = float(inarray.shape[dimension])
s = N.add.reduce(1.0/inarray, dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
s = N.reshape(s, shp)
else: # must be a SEQUENCE of dims to average over
dims = sorted(dimension)
nondims = []
for i in range(len(inarray.shape)):
if i not in dims:
nondims.append(i)
tinarray = N.transpose(inarray, nondims+dims) # put keep-dims first
idx = [0] * len(nondims)
if idx == []:
size = len(N.ravel(inarray))
s = asum(1.0 / inarray)
if keepdims == 1:
s = N.reshape([s], N.ones(len(inarray.shape)))
else:
idx[0] = -1
loopcap = N.array(tinarray.shape[0:len(nondims)]) - 1
s = N.zeros(loopcap+1, N.Float)
while incr(idx, loopcap) != -1:
s[idx] = asum(1.0/tinarray[idx])
size = N.multiply.reduce(N.take(inarray.shape, dims))
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s, shp)
return size / s
def amean(inarray, dimension=None, keepdims=0):
"""
Calculates the arithmatic mean of the values in the passed array.
That is: 1/n * (x1 + x2 + ... + xn). Defaults to ALL values in the
passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: amean(inarray,dimension=None,keepdims=0)
Returns: arithematic mean calculated over dim(s) in dimension
"""
if inarray.typecode() in ['l', 's', 'b']:
inarray = inarray.astype(N.Float)
if dimension is None:
inarray = N.ravel(inarray)
sum = N.add.reduce(inarray)
denom = float(len(inarray))
elif type(dimension) in [int, float]:
sum = asum(inarray, dimension)
denom = float(inarray.shape[dimension])
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum, shp)
else: # must be a TUPLE of dims to average over
dims = sorted(dimension)
dims.reverse()
sum = inarray * 1.0
for dim in dims:
sum = N.add.reduce(sum, dim)
denom = N.array(N.multiply.reduce(N.take(inarray.shape, dims)), N.Float)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
sum = N.reshape(sum, shp)
return sum/denom
def amedian(inarray, numbins=1000):
"""
Calculates the COMPUTED median value of an array of numbers, given the
number of bins to use for the histogram (more bins approaches finding the
precise median value of the array; default number of bins = 1000). From
G.W. Heiman's Basic Stats, or CRC Probability & Statistics.
NOTE: THIS ROUTINE ALWAYS uses the entire passed array (flattens it first).
Usage: amedian(inarray,numbins=1000)
Returns: median calculated over ALL values in inarray
"""
inarray = N.ravel(inarray)
(hist, smallest, binsize, extras) = ahistogram(inarray, numbins)
cumhist = N.cumsum(hist) # make cumulative histogram
otherbins = N.greater_equal(cumhist, len(inarray)/2.0)
otherbins = list(otherbins) # list of 0/1s, 1s start at median bin
cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin
freq = hist[cfbin] # frequency IN the 50%ile bin
median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN
return median
def amedianscore(inarray, dimension=None):
"""
Returns the 'middle' score of the passed array. If there is an even
number of scores, the mean of the 2 middle scores is returned. Can function
with 1D arrays, or on the FIRST dimension of 2D arrays (i.e., dimension can
be None, to pre-flatten the array, or else dimension must equal 0).
Usage: amedianscore(inarray,dimension=None)
Returns: 'middle' score of the array, or the mean of the 2 middle scores
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
inarray = N.sort(inarray, dimension)
if inarray.shape[dimension] % 2 == 0: # if even number of elements
indx = inarray.shape[dimension]/2 # integer division correct
median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0
else:
indx = inarray.shape[dimension] / 2 # integer division correct
median = N.take(inarray, [indx], dimension)
if median.shape == (1,):
median = median[0]
return median
def amode(a, dimension=None):
"""
Returns an array of the modal (most common) score in the passed array.
If there is more than one such score, ONLY THE FIRST is returned.
The bin-count for the modal values is also returned. Operates on whole
array (dimension=None), or on a given dimension.
Usage: amode(a, dimension=None)
Returns: array of bin-counts for mode(s), array of corresponding modal values
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
scores = pstat.aunique(N.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[dimension] = 1
oldmostfreq = N.zeros(testshape)
oldcounts = N.zeros(testshape)
for score in scores:
template = N.equal(a, score)
counts = asum(template, dimension, 1)
mostfrequent = N.where(N.greater(counts, oldcounts), score, oldmostfreq)
oldcounts = N.where(N.greater(counts, oldcounts), counts, oldcounts)
oldmostfreq = mostfrequent
return oldcounts, mostfrequent
def atmean(a, limits=None, inclusive=(1, 1)):
"""
Returns the arithmetic mean of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atmean(a,limits=None,inclusive=(1,1))
"""
if a.typecode() in ['l', 's', 'b']:
a = a.astype(N.Float)
if limits is None:
return mean(a)
assert type(limits) in [list, tuple, N.ArrayType], "Wrong type for limits in atmean"
if inclusive[0]:
lowerfcn = N.greater_equal
else:
lowerfcn = N.greater
if inclusive[1]:
upperfcn = N.less_equal
else:
upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atmean).")
elif limits[0] is None and limits[1] is not None:
mask = upperfcn(a, limits[1])
elif limits[0] is not None and limits[1] is None:
mask = lowerfcn(a, limits[0])
elif limits[0] is not None and limits[1] is not None:
mask = lowerfcn(a, limits[0])*upperfcn(a, limits[1])
s = float(N.add.reduce(N.ravel(a*mask)))
n = float(N.add.reduce(N.ravel(mask)))
return s/n
def atvar(a, limits=None, inclusive=(1, 1)):
"""
Returns the sample variance of values in an array, (i.e., using N-1),
ignoring values strictly outside the sequence passed to 'limits'.
Note: either limit in the sequence, or the value of limits itself,
can be set to None. The inclusive list/tuple determines whether the lower
and upper limiting bounds (respectively) are open/exclusive (0) or
closed/inclusive (1).
Usage: atvar(a,limits=None,inclusive=(1,1))
"""
a = a.astype(N.Float)
if limits is None or limits == [None, None]:
term1 = N.add.reduce(N.ravel(a*a))
n = float(len(N.ravel(a))) - 1
term2 = N.add.reduce(N.ravel(a))**2 / n
print(term1, term2, n)
return (term1 - term2) / n
assert type(limits) in [list, tuple, N.ArrayType], "Wrong type for limits in atvar"
if inclusive[0]:
lowerfcn = N.greater_equal
else:
lowerfcn = N.greater
if inclusive[1]:
upperfcn = N.less_equal
else:
upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atvar).")
elif limits[0] is None and limits[1] is not None:
mask = upperfcn(a, limits[1])
elif limits[0] is not None and limits[1] is None:
mask = lowerfcn(a, limits[0])
elif limits[0] is not None and limits[1] is not None:
mask = lowerfcn(a, limits[0])*upperfcn(a, limits[1])
term1 = N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask))) - 1
term2 = N.add.reduce(N.ravel(a*mask))**2 / n
print(term1, term2, n)
return (term1 - term2) / n
def atmin(a, lowerlimit=None, dimension=None, inclusive=1):
"""
Returns the minimum value of a, along dimension, including only values less
than (or equal to, if inclusive=1) lowerlimit. If the limit is set to None,
all values in the array are used.
Usage: atmin(a,lowerlimit=None,dimension=None,inclusive=1)
"""
if inclusive:
lowerfcn = N.greater
else:
lowerfcn = N.greater_equal
if dimension is None:
a = N.ravel(a)
dimension = 0
if lowerlimit is None:
lowerlimit = N.minimum.reduce(N.ravel(a))-11
biggest = N.maximum.reduce(N.ravel(a))
ta = N.where(lowerfcn(a, lowerlimit), a, biggest)
return N.minimum.reduce(ta, dimension)
def atmax(a, upperlimit, dimension=None, inclusive=1):
"""
Returns the maximum value of a, along dimension, including only values greater
than (or equal to, if inclusive=1) upperlimit. If the limit is set to None,
a limit larger than the max value in the array is used.
Usage: atmax(a,upperlimit,dimension=None,inclusive=1)
"""
if inclusive:
upperfcn = N.less
else:
upperfcn = N.less_equal
if dimension is None:
a = N.ravel(a)
dimension = 0
if upperlimit is None:
upperlimit = N.maximum.reduce(N.ravel(a))+1
smallest = N.minimum.reduce(N.ravel(a))
ta = N.where(upperfcn(a, upperlimit), a, smallest)
return N.maximum.reduce(ta, dimension)
def atstdev(a, limits=None, inclusive=(1, 1)):
"""
Returns the standard deviation of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atstdev(a,limits=None,inclusive=(1,1))
"""
return N.sqrt(tvar(a, limits, inclusive))
def atsem(a, limits=None, inclusive=(1, 1)):
"""
Returns the standard error of the mean for the values in an array,
(i.e., using N for the denominator), ignoring values strictly outside
the sequence passed to 'limits'. Note: either limit in the sequence,
or the value of limits itself, can be set to None. The inclusive list/tuple
determines whether the lower and upper limiting bounds (respectively) are
open/exclusive (0) or closed/inclusive (1).
Usage: atsem(a,limits=None,inclusive=(1,1))
"""
sd = tstdev(a, limits, inclusive)
if limits is None or limits == [None, None]:
n = float(len(N.ravel(a)))
assert type(limits) in [list, tuple, N.ArrayType], "Wrong type for limits in atsem"
if inclusive[0]:
lowerfcn = N.greater_equal
else:
lowerfcn = N.greater
if inclusive[1]:
upperfcn = N.less_equal
else:
upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atsem).")
elif limits[0] is None and limits[1] is not None:
mask = upperfcn(a, limits[1])
elif limits[0] is not None and limits[1] is None:
mask = lowerfcn(a, limits[0])
elif limits[0] is not None and limits[1] is not None:
mask = lowerfcn(a, limits[0])*upperfcn(a, limits[1])
N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask)))
return sd/math.sqrt(n)
# AMOMENTS
def amoment(a, moment=1, dimension=None):
"""
Calculates the nth moment about the mean for a sample (defaults to the
1st moment). Generally used to calculate coefficients of skewness and
kurtosis. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions).
Usage: amoment(a,moment=1,dimension=None)
Returns: appropriate moment along given dimension
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
if moment == 1:
return 0.0
else:
mn = amean(a, dimension, 1) # 1=keepdims
s = N.power((a-mn), moment)
return amean(s, dimension)
def avariation(a, dimension=None):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: avariation(a,dimension=None)
"""
return 100.0*asamplestdev(a, dimension)/amean(a, dimension)
def askew(a, dimension=None):
"""
Returns the skewness of a distribution (normal ==> 0.0; >0 means extra
weight in left tail). Use askewtest() to see if it's close enough.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions).
Usage: askew(a, dimension=None)
Returns: skew of vals in a along dimension, returning ZERO where all vals equal
"""
denom = N.power(amoment(a, 2, dimension), 1.5)
zero = N.equal(denom, 0)
if isinstance(denom, N.ArrayType) and asum(zero) != 0:
print("Number of zeros in askew: ", asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a, 3, dimension)/denom)
def akurtosis(a, dimension=None):
"""
Returns the kurtosis of a distribution (normal ==> 3.0; >3 means
heavier in the tails, and usually more peaked). Use akurtosistest()
to see if it's close enough. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: akurtosis(a,dimension=None)
Returns: kurtosis of values in a along dimension, and ZERO where all vals equal
"""
denom = N.power(amoment(a, 2, dimension), 2)
zero = N.equal(denom, 0)
if isinstance(denom, N.ArrayType) and asum(zero) != 0:
print("Number of zeros in akurtosis: ", asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a, 4, dimension)/denom)
def adescribe(inarray, dimension=None):
"""
Returns several descriptive statistics of the passed array. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
Usage: adescribe(inarray,dimension=None)
Returns: n, (min,max), mean, standard deviation, skew, kurtosis
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
n = inarray.shape[dimension]
mm = (N.minimum.reduce(inarray), N.maximum.reduce(inarray))
m = amean(inarray, dimension)
sd = astdev(inarray, dimension)
skew = askew(inarray, dimension)
kurt = akurtosis(inarray, dimension)
return n, mm, m, sd, skew, kurt
# NORMALITY TESTS
def askewtest(a, dimension=None):
"""
Tests whether the skew is significantly different from a normal
distribution. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions).
Usage: askewtest(a,dimension=None)
Returns: z-score and 2-tail z-probability
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
b2 = askew(a, dimension)
n = float(a.shape[dimension])
y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)))
beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9))
W2 = -1 + N.sqrt(2*(beta2-1))
delta = 1/N.sqrt(N.log(N.sqrt(W2)))
alpha = N.sqrt(2/(W2-1))
y = N.where(N.equal(y, 0), 1, y)
Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))
return Z, (1.0-zprob(Z))*2
def akurtosistest(a, dimension=None):
"""
Tests whether a dataset has normal kurtosis (i.e.,
kurtosis=3(n-1)/(n+1)) Valid only for n>20. Dimension can equal None
(ravel array first), an integer (the dimension over which to operate),
or a sequence (operate over multiple dimensions).
Usage: akurtosistest(a,dimension=None)
Returns: z-score and 2-tail z-probability, returns 0 for bad pixels
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
n = float(a.shape[dimension])
if n < 20:
print("akurtosistest only valid for n>=20 ... continuing anyway, n=", n)
b2 = akurtosis(a, dimension)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
x = (b2-E)/N.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5))
/ (n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*N.sqrt(2/(A-4.0))
denom = N.where(N.less(denom, 0), 99, denom)
term2 = N.where(N.equal(denom, 0), term1, N.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / N.sqrt(2/(9.0*A))
Z = N.where(N.equal(denom, 99), 0, Z)
return Z, (1.0-zprob(Z))*2
def anormaltest(a, dimension=None):
"""
Tests whether skew and/OR kurtosis of dataset differs from normal
curve. Can operate over multiple dimensions. Dimension can equal
None (ravel array first), an integer (the dimension over which to
operate), or a sequence (operate over multiple dimensions).
Usage: anormaltest(a,dimension=None)
Returns: z-score and 2-tail probability
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
s, p = askewtest(a, dimension)
k, p = akurtosistest(a, dimension)
k2 = N.power(s, 2) + N.power(k, 2)
return k2, achisqprob(k2, 2)
# AFREQUENCY FUNCTIONS
def aitemfreq(a):
"""
Returns a 2D array of item frequencies. Column 1 contains item values,
column 2 contains their respective counts. Assumes a 1D array is passed.
Usage: aitemfreq(a)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.aunique(a)
scores = N.sort(scores)
freq = N.zeros(len(scores))
for i in range(len(scores)):
freq[i] = N.add.reduce(N.equal(a, scores[i]))
return N.array(pstat.aabut(scores, freq))
def ascoreatpercentile(inarray, percent):
"""
Usage: ascoreatpercentile(inarray,percent) 0= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def apercentileofscore(inarray, score, histbins=10, defaultlimits=None):
"""
Note: result of this function depends on the values used to histogram
the data(!).
Usage: apercentileofscore(inarray,score,histbins=10,defaultlimits=None)
Returns: percentile-position of score (0-100) relative to inarray
"""
h, lrl, binsize, extras = histogram(inarray, histbins, defaultlimits)
cumhist = cumsum(h*1)
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inarray)) * 100
return pct
def ahistogram(inarray, numbins=10, defaultlimits=None, printextras=1):
"""
Returns (i) an array of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. Defaultlimits
can be None (the routine picks bins spanning all the numbers in the
inarray) or a 2-sequence (lowerlimit, upperlimit). Returns all of the
following: array of bin values, lowerreallimit, binsize, extrapoints.
Usage: ahistogram(inarray,numbins=10,defaultlimits=None,printextras=1)
Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range)
"""
inarray = N.ravel(inarray) # flatten any >1D arrays
if (defaultlimits is not None):
lowerreallimit = defaultlimits[0]
upperreallimit = defaultlimits[1]
binsize = (upperreallimit-lowerreallimit) / float(numbins)
else:
Min = N.minimum.reduce(inarray)
Max = N.maximum.reduce(inarray)
estbinwidth = float(Max - Min)/float(numbins) + 1
binsize = (Max-Min+estbinwidth)/float(numbins)
lowerreallimit = Min - binsize/2.0 # lower real limit,1st bin
bins = N.zeros(numbins)
extrapoints = 0
for num in inarray:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit) / float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except Exception: # point outside lower/upper limits
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =', extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def acumfreq(a, numbins=10, defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: acumfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h, l, b, e = histogram(a, numbins, defaultreallimits)
cumhist = cumsum(h*1)
return cumhist, l, b, e
def arelfreq(a, numbins=10, defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: arelfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h, l, b, e = histogram(a, numbins, defaultreallimits)
h = N.array(h/float(a.shape[0]))
return h, l, b, e
# AVARIABILITY FUNCTIONS
def aobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in *args is one level of a factor. If an F_oneway() run on the
transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Usage: aobrientransform(*args) *args = 1D arrays, one per level of factor
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = N.zeros(k, N.Float)
v = N.zeros(k, N.Float)
m = N.zeros(k, N.Float)
nargs = []
for i in range(k):
nargs.append(args[i].astype(N.Float))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Lack of convergence in obrientransform.')
else:
return N.array(nargs)
def asamplevar(inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplevar(inarray,dimension=None,keepdims=0)
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
if dimension == 1:
mn = amean(inarray, dimension)[:, N.NewAxis]
else:
mn = amean(inarray, dimension, keepdims=1)
deviations = inarray - mn
if isinstance(dimension, list):
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
svar = ass(deviations, dimension, keepdims) / float(n)
return svar
def asamplestdev(inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplestdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(asamplevar(inarray, dimension, keepdims))
def asignaltonoise(instack, dimension=0):
"""
Calculates signal-to-noise. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: asignaltonoise(instack,dimension=0):
Returns: array containing the value of (mean/stdev) along dimension,
or 0 when stdev=0
"""
m = mean(instack, dimension)
sd = stdev(instack, dimension)
return N.where(N.equal(sd, 0), 0, m/sd)
def avar(inarray, dimension=None, keepdims=0):
"""
Returns the estimated population variance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: avar(inarray,dimension=None,keepdims=0)
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
mn = amean(inarray, dimension, 1)
deviations = inarray - mn
if isinstance(dimension, list):
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
var = ass(deviations, dimension, keepdims)/float(n-1)
return var
def astdev(inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard deviation of the values in
the passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: astdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(avar(inarray, dimension, keepdims))
def asterr(inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard error of the values in the
passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: asterr(inarray,dimension=None,keepdims=0)
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
return astdev(inarray, dimension, keepdims) / float(N.sqrt(inarray.shape[dimension]))
def asem(inarray, dimension=None, keepdims=0):
"""
Returns the standard error of the mean (i.e., using N) of the values
in the passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: asem(inarray,dimension=None, keepdims=0)
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
if isinstance(dimension, list):
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
s = asamplestdev(inarray, dimension, keepdims) / N.sqrt(n-1)
return s
def az(a, score):
"""
Returns the z-score of a given input score, given thearray from which
that score came. Not appropriate for population calculations, nor for
arrays > 1D.
Usage: az(a, score)
"""
z = (score-amean(a)) / asamplestdev(a)
return z
def azs(a):
"""
Returns a 1D array of z-scores, one for each score in the passed array,
computed relative to the passed array.
Usage: azs(a)
"""
zscores = []
for item in a:
zscores.append(z(a, item))
return N.array(zscores)
def azmap(scores, compare, dimension=0):
"""
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to
array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0
of the compare array.
Usage: azs(scores, compare, dimension=0)
"""
mns = amean(compare, dimension)
sstd = asamplestdev(compare, 0)
return (scores - mns) / sstd
# ATRIMMING FUNCTIONS
def around(a, digits=1):
"""
Rounds all values in array a to 'digits' decimal places.
Usage: around(a,digits)
Returns: a, where each value is rounded to 'digits' decimals
"""
def ar(x, d=digits):
return round(x, d)
if not isinstance(a, N.ArrayType):
try:
a = N.array(a)
except Exception:
a = N.array(a, 'O')
shp = a.shape
if a.typecode() in ['f', 'F', 'd', 'D']:
b = N.ravel(a)
b = N.array([ar(_) for _ in b])
b.shape = shp
elif a.typecode() in ['o', 'O']:
b = N.ravel(a)*1
for i in range(len(b)):
if isinstance(b[i], float):
b[i] = round(b[i], digits)
b.shape = shp
else: # not a float, double or Object array
b = a*1
return b
def athreshold(a, threshmin=None, threshmax=None, newval=0):
"""
Like Numeric.clip() except that values threshmax are replaced
by newval instead of by threshmin/threshmax (respectively).
Usage: athreshold(a,threshmin=None,threshmax=None,newval=0)
Returns: a, with values threshmax replaced with newval
"""
mask = N.zeros(a.shape)
if threshmin is not None:
mask = mask + N.where(N.less(a, threshmin), 1, 0)
if threshmax is not None:
mask = mask + N.where(N.greater(a, threshmax), 1, 0)
mask = N.clip(mask, 0, 1)
return N.where(mask, newval, a)
def atrimboth(a, proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
array (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND
'rightmost' 10% of scores. You must pre-sort the array if you want
"proper" trimming. Slices off LESS if proportion results in a
non-integer slice index (i.e., conservatively slices off
proportiontocut).
Usage: atrimboth (a,proportiontocut)
Returns: trimmed version of array a
"""
lowercut = int(proportiontocut*len(a))
uppercut = len(a) - lowercut
return a[lowercut:uppercut]
def atrim1(a, proportiontocut, tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
array (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: atrim1(a,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of array a
"""
if string.lower(tail) == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut*len(a))
elif string.lower(tail) == 'left':
lowercut = int(proportiontocut*len(a))
uppercut = len(a)
return a[lowercut:uppercut]
# ACORRELATION FUNCTIONS
def acovariance(X):
"""
Computes the covariance matrix of a matrix X. Requires a 2D matrix input.
Usage: acovariance(X)
Returns: covariance matrix of X
"""
if len(X.shape) != 2:
raise TypeError("acovariance requires 2D matrices")
n = X.shape[0]
mX = amean(X, 0)
return N.dot(N.transpose(X), X) / float(n) - N.multiply.outer(mX, mX)
def acorrelation(X):
"""
Computes the correlation matrix of a matrix X. Requires a 2D matrix input.
Usage: acorrelation(X)
Returns: correlation matrix of X
"""
C = acovariance(X)
V = N.diagonal(C)
return C / N.sqrt(N.multiply.outer(V, V))
def apaired(x, y):
"""
Interactively determines the type of data in x and y, and then runs the
appropriated statistic for paired group data.
Usage: apaired(x,y) x,y = the two arrays of values to be compared
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i', 'r', 'I', 'R', 'c', 'C']:
print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = input()
if samples in ['i', 'I', 'r', 'R']:
print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x, y)
f, p = F_oneway(pstat.colex(r, 0), pstat.colex(r, 1))
if p < 0.05:
vartype = 'unequal, p='+str(round(p, 4))
else:
vartype = 'equal'
print(vartype)
if samples in ['i', 'I']:
if vartype[0] == 'e':
t, p = ttest_ind(x, y, None, 0)
print('\nIndependent samples t-test: ', round(t, 4), round(p, 4))
else:
if len(x) > 20 or len(y) > 20:
z, p = ranksums(x, y)
print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4))
else:
u, p = mannwhitneyu(x, y)
print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4))
else: # RELATED SAMPLES
if vartype[0] == 'e':
t, p = ttest_rel(x, y, 0)
print('\nRelated samples t-test: ', round(t, 4), round(p, 4))
else:
t, p = ranksums(x, y)
print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c', 'C', 'r', 'R', 'd', 'D']:
print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = input()
if corrtype in ['c', 'C']:
m, b, r, p, see = linregress(x, y)
print('\nLinear regression for continuous variables ...')
lol = [['Slope', 'Intercept', 'r', 'Prob', 'SEestimate'], [round(m, 4), round(b, 4), round(r, 4), round(p, 4), round(see, 4)]]
pstat.printcc(lol)
elif corrtype in ['r', 'R']:
r, p = spearmanr(x, y)
print('\nCorrelation for ranked variables ...')
print("Spearman's r: ", round(r, 4), round(p, 4))
else: # DICHOTOMOUS
r, p = pointbiserialr(x, y)
print('\nAssuming x contains a dichotomous variable ...')
print('Point Biserial r: ', round(r, 4), round(p, 4))
print('\n\n')
return None
def apearsonr(x, y, verbose=1):
"""
Calculates a Pearson correlation coefficient and returns p. Taken
from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195.
Usage: apearsonr(x,y,verbose=1) where x,y are equal length arrays
Returns: Pearson's r, two-tailed p-value
"""
TINY = 1.0e-20
n = len(x)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = (r_num / r_den)
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df, 0.5, df/(df+t*t), verbose)
return r, prob
def aspearmanr(x, y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: aspearmanr(x,y) where x,y are equal-length arrays
Returns: Spearman's r, two-tailed p-value
"""
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = N.add.reduce((rankx-ranky)**2)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = abetai(0.5*df, 0.5, df/(df+t*t))
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They close to tables, but not exact.(?)
return rs, probrs
def apointbiserialr(x, y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: apointbiserialr(x,y) where x,y are equal length arrays
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
categories = pstat.aunique(x)
data = pstat.aabut(x, y)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required (in x) for pointbiserialr().")
else: # there are 2 categories, continue
codemap = pstat.aabut(categories, N.arange(2))
pstat.arecode(data, codemap, 0) # recoded
x = pstat.alinexand(data, 0, categories[0])
y = pstat.alinexand(data, 0, categories[1])
xmean = amean(pstat.acolex(x, 1))
ymean = amean(pstat.acolex(y, 1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/asamplestdev(pstat.acolex(data, 1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = abetai(0.5*df, 0.5, df/(df+t*t))
return rpb, prob
def akendalltau(x, y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-cases.@@@
Usage: akendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j, len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither array has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss - 1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def alinregress(*args):
"""
Calculates a regression line on two arrays, x and y, corresponding to x,y
pairs. If a single 2D array is passed, alinregress finds dim with 2 levels
and splits data into x,y pairs along that dim.
Usage: alinregress(*args) args=2 equal-length arrays, or one 2D array
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:, 0]
y = args[:, 1]
else:
x = args[0]
y = args[1]
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = r_num / r_den
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df, 0.5, df/(df+t*t))
slope = r_num / (float(n)*ass(x) - asquare_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*asamplestdev(y)
return slope, intercept, r, prob, sterrest
# AINFERENTIAL STATISTICS
def attest_1samp(a, popmean, printit=0, name='Sample', writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: attest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
if not isinstance(a, N.ArrayType):
a = N.array(a)
x = amean(a)
v = avar(a)
n = len(a)
df = n-1
svar = ((n-1)*v) / float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = abetai(0.5*df, 0.5, df/(df+t*t))
if printit != 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit, writemode,
'Population', '--', popmean, 0, 0, 0,
name, n, x, v, N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
statname, t, prob)
return t, prob
def attest_ind(a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores
a, and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_ind (a,b,dimension=None,printit=0,
Name1='Samp1',Name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension is None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
x1 = amean(a, dimension)
x2 = amean(b, dimension)
v1 = avar(a, dimension)
v2 = avar(b, dimension)
n1 = a.shape[dimension]
n2 = b.shape[dimension]
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
zerodivproblem = N.equal(svar, 0)
svar = N.where(zerodivproblem, 1, svar) # avoid zero-division in 1st place
t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem, 1.0, t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df, 0.5, float(df)/(df+t*t))
if isinstance(t, N.ArrayType):
probs = N.reshape(probs, t.shape)
if len(probs) == 1:
probs = probs[0]
if printit != 0:
if isinstance(t, N.ArrayType):
t = t[0]
if isinstance(probs, N.ArrayType):
probs = probs[0]
statname = 'Independent samples T-test.'
outputpairedstats(printit, writemode,
name1, n1, x1, v1, N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2, n2, x2, v2, N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname, t, probs)
return
return t, probs
def attest_rel(a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores, a
and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_rel(a,b,dimension=None,printit=0,
name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension is None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
if len(a) != len(b):
raise ValueError('Unequal length arrays.')
x1 = amean(a, dimension)
x2 = amean(b, dimension)
v1 = avar(a, dimension)
v2 = avar(b, dimension)
n = a.shape[dimension]
df = float(n-1)
d = (a-b).astype('d')
denom = N.sqrt((n*N.add.reduce(d*d, dimension) - N.add.reduce(d, dimension)**2) / df)
zerodivproblem = N.equal(denom, 0)
denom = N.where(zerodivproblem, 1, denom) # avoid zero-division in 1st place
t = N.add.reduce(d, dimension) / denom # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem, 1.0, t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df, 0.5, float(df)/(df+t*t))
if isinstance(t, N.ArrayType):
probs = N.reshape(probs, t.shape)
if len(probs) == 1:
probs = probs[0]
if printit != 0:
statname = 'Related samples T-test.'
outputpairedstats(printit, writemode,
name1, n, x1, v1, N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2, n, x2, v2, N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname, t, probs)
return
return t, probs
def achisquare(f_obs, f_exp=None):
"""
Calculates a one-way chi square for array of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: achisquare(f_obs, f_exp=None) f_obs = array of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs)
if f_exp is None:
f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs), N.Float)
f_exp = f_exp.astype(N.Float)
chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
return chisq, chisqprob(chisq, k-1)
def aks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. Modified from
Numerical Recipies in C, page 493. Returns KS D-value, prob. Not ufunc-
like.
Usage: aks_2samp(data1,data2) where data1 and data2 are 1D arrays
Returns: KS D-value, p-value
"""
j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE
j2 = 0 # N.zeros(data2.shape[1:])
fn1 = 0.0 # N.zeros(data1.shape[1:],N.Float)
fn2 = 0.0 # N.zeros(data2.shape[1:],N.Float)
n1 = data1.shape[0]
n2 = data2.shape[0]
en1 = n1*1
en2 = n2*1
d = N.zeros(data1.shape[1:], N.Float)
data1 = N.sort(data1, 0)
data2 = N.sort(data2, 0)
while j1 < n1 and j2 < n2:
d1 = data1[j1]
d2 = data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if abs(dt) > abs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = aksprob((en+0.12+0.11/en)*N.fabs(d))
except Exception:
prob = 1.0
return d, prob
def amannwhitneyu(x, y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. REMEMBER: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
Usage: amannwhitneyu(x,y) where x,y are arrays of values for 2 conditions
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(N.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1, u2)
smallu = min(u1, u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def atiecorrect(rankvals):
"""
Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c
code.
Usage: atiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted, posn = ashellsort(N.array(rankvals))
n = len(sorted)
T = 0.0
i = 0
while (i < n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i < n-1) and (sorted[i] == sorted[i+1]):
nties = nties + 1
i = i + 1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def aranksums(x, y):
"""
Calculates the rank sums statistic on the provided scores and returns
the result.
Usage: aranksums(x,y) where x,y are arrays of values for 2 conditions
Returns: z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = N.concatenate((x, y))
ranked = arankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 - zprob(abs(z)))
return z, prob
def awilcoxont(x, y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: awilcoxont(x,y) where x,y are equal-length arrays for 2 conditions
Returns: t-statistic, two-tailed p-value
"""
if len(x) != len(y):
raise ValueError('Unequal N in awilcoxont. Aborting.')
d = x-y
d = N.compress(N.not_equal(d, 0), d) # Keep all non-zero differences
count = len(d)
absd = abs(d)
absranked = arankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
z = math.fabs(wt-mn) / se
prob = 2*(1.0 - zprob(abs(z)))
return wt, prob
def akruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H and associated p-value for 3 or more
independent samples.
Usage: akruskalwallish(*args) args are separate arrays for 3+ conditions
Returns: H-statistic (corrected for ties), associated p-value
"""
assert len(args) == 3, "Need at least 3 groups in stats.akruskalwallish()"
args = list(args)
n = [0]*len(args)
n = [len(_) for _ in args]
all = []
for i in range(len(args)):
all = all + args[i].tolist()
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in akruskalwallish')
h = h / float(T)
return h, chisqprob(h, df)
def afriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for
repeated measures and returns the result, along with the associated
probability value. It assumes 3 or more repeated measures. Only 3
levels requires a minimum of 10 subjects in the study. Four levels
requires 5 subjects per level(??).
Usage: afriedmanchisquare(*args) args are separate arrays for 2+ conditions
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
data = pstat.aabut(*args)
data = data.astype(N.Float)
for i in range(len(data)):
data[i] = arankdata(data[i])
ssbn = asum(asum(args, 1)**2)
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq, k-1)
# APROBABILITY CALCULATIONS
def achisqprob(chisq, df):
"""
Returns the (1-tail) probability value associated with the provided chi-square
value and df. Heavily modified from chisq.c in Gary Perlman's |Stat. Can
handle multiple dimensions.
Usage: achisqprob(chisq,df) chisq=chisquare stat., df=degrees of freedom
"""
BIG = 200.0
def ex(x):
BIG = 200.0
exponents = N.where(N.less(x, -BIG), -BIG, x)
return N.exp(exponents)
if not isinstance(chisq, N.ArrayType):
chisq = N.array([chisq])
if df < 1:
return N.ones(chisq.shape, N.float)
probs = N.zeros(chisq.shape, N.Float)
probs = N.where(N.less_equal(chisq, 0), 1.0, probs) # set prob=1 for chisq<0
a = 0.5 * chisq
if df > 1:
y = ex(-a)
if df % 2 == 0:
even = 1
s = y*1
s2 = s*1
else:
even = 0
s = 2.0 * azprob(-N.sqrt(chisq))
s2 = s*1
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = N.ones(probs.shape, N.Float)
else:
z = 0.5 * N.ones(probs.shape, N.Float)
if even:
e = N.zeros(probs.shape, N.Float)
else:
e = N.log(N.sqrt(N.pi)) * N.ones(probs.shape, N.Float)
c = N.log(a)
mask = N.zeros(probs.shape)
a_big = N.greater(a, BIG)
a_big_frozen = -1 * N.ones(probs.shape, N.Float)
totalelements = N.multiply.reduce(N.array(probs.shape))
while asum(mask) != totalelements:
e = N.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
# print z, e, s
newmask = N.greater(z, chisq)
a_big_frozen = N.where(newmask*N.equal(mask, 0)*a_big, s, a_big_frozen)
mask = N.clip(newmask+mask, 0, 1)
if even:
z = N.ones(probs.shape, N.Float)
e = N.ones(probs.shape, N.Float)
else:
z = 0.5 * N.ones(probs.shape, N.Float)
e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape, N.Float)
c = 0.0
mask = N.zeros(probs.shape)
a_notbig_frozen = -1 * N.ones(probs.shape, N.Float)
while asum(mask) != totalelements:
e = e * (a/z.astype(N.Float))
c = c + e
z = z + 1.0
# print '#2', z, e, c, s, c*y+s2
newmask = N.greater(z, chisq)
a_notbig_frozen = N.where(newmask*N.equal(mask, 0)*(1-a_big),
c*y+s2, a_notbig_frozen)
mask = N.clip(newmask+mask, 0, 1)
probs = N.where(N.equal(probs, 1), 1,
N.where(N.greater(a, BIG), a_big_frozen, a_notbig_frozen))
return probs
else:
return s
def aerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional error
everywhere less than 1.2e-7. Adapted from Numerical Recipies. Can
handle multiple dimensions.
Usage: aerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * N.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
return N.where(N.greater_equal(x, 0), ans, 2.0-ans)
def azprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat. Can handle multiple dimensions.
Usage: azprob(z) where z is a z-value
"""
def yfunc(y):
x = (((((((((((((-0.000045255659 * y
+ 0.000152529290) * y - 0.000019538132) * y
- 0.000676904986) * y + 0.001390604284) * y
- 0.000794620820) * y - 0.002034254874) * y
+ 0.006549791214) * y - 0.010557625006) * y
+ 0.011630447319) * y - 0.009279453341) * y
+ 0.005353579108) * y - 0.002141268741) * y
+ 0.000535310849) * y + 0.999936657524
return x
def wfunc(w):
x = ((((((((0.000124818987 * w
- 0.001075204047) * w + 0.005198775019) * w
- 0.019198292004) * w + 0.059054035642) * w
- 0.151968751364) * w + 0.319152932694) * w
- 0.531923007300) * w + 0.797884560593) * N.sqrt(w) * 2.0
return x
Z_MAX = 6.0 # maximum meaningful z-value
x = N.zeros(z.shape, N.Float) # initialize
y = 0.5 * N.fabs(z)
x = N.where(N.less(y, 1.0), wfunc(y*y), yfunc(y-2.0)) # get x's
x = N.where(N.greater(y, Z_MAX*0.5), 1.0, x) # kill those with big Z
prob = N.where(N.greater(z, 0), (x+1)*0.5, (1-x)*0.5)
return prob
def aksprob(alam):
"""
Returns the probability value for a K-S statistic computed via ks_2samp.
Adapted from Numerical Recipies. Can handle multiple dimensions.
Usage: aksprob(alam)
"""
if isinstance(alam, N.ArrayType):
frozen = -1 * N.ones(alam.shape, N.Float64)
alam = alam.astype(N.Float64)
arrayflag = 1
else:
frozen = N.array(-1.)
alam = N.array(alam, N.Float64)
mask = N.zeros(alam.shape)
fac = 2.0 * N.ones(alam.shape, N.Float)
sum = N.zeros(alam.shape, N.Float)
termbf = N.zeros(alam.shape, N.Float)
a2 = N.array(-2.0*alam*alam, N.Float64)
totalelements = N.multiply.reduce(N.array(mask.shape))
for j in range(1, 201):
if asum(mask) == totalelements:
break
exponents = (a2*j*j)
overflowmask = N.less(exponents, -746)
frozen = N.where(overflowmask, 0, frozen)
mask = mask+overflowmask
term = fac*N.exp(exponents)
sum = sum + term
newmask = N.where(N.less_equal(abs(term), (0.001*termbf))
+ N.less(abs(term), 1.0e-8*sum), 1, 0)
frozen = N.where(newmask*N.equal(mask, 0), sum, frozen)
mask = N.clip(mask+newmask, 0, 1)
fac = -fac
termbf = abs(term)
if arrayflag:
return N.where(N.equal(frozen, -1), 1.0, frozen) # 1.0 if doesn't converge
else:
return N.where(N.equal(frozen, -1), 1.0, frozen)[0] # 1.0 if doesn't converge
def afprob(dfnum, dfden, F):
"""
Returns the 1-tailed significance level (p-value) of an F statistic
given the degrees of freedom for the numerator (dfR-dfF) and the degrees
of freedom for the denominator (dfF). Can handle multiple dims for F.
Usage: afprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
if isinstance(F, N.ArrayType):
return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))
else:
return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
def abetacf(a, b, x, verbose=1):
"""
Evaluates the continued fraction form of the incomplete Beta function,
betai. (Adapted from: Numerical Recipies in C.) Can handle multiple
dimensions for x.
Usage: abetacf(a,b,x,verbose=1)
"""
ITMAX = 200
EPS = 3.0e-7
arrayflag = 1
if isinstance(x, N.ArrayType):
frozen = N.ones(x.shape, N.Float) * -1 # start out w/ -1s, should replace all
else:
arrayflag = 0
frozen = N.array([-1])
x = N.array([x])
mask = N.zeros(x.shape)
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
if N.sum(N.ravel(N.equal(frozen, -1))) == 0:
break
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az*1
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
newmask = N.less(abs(az-aold), EPS*abs(az))
frozen = N.where(newmask*N.equal(mask, 0), az, frozen)
mask = N.clip(mask+newmask, 0, 1)
noconverge = asum(N.equal(frozen, -1))
if noconverge != 0 and verbose:
print('a or b too big, or ITMAX too small in Betacf for ', noconverge, ' elements')
if arrayflag:
return frozen
else:
return frozen[0]
def agammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
Adapted from: Numerical Recipies in C. Can handle multiple dims ... but
probably doesn't normally have to.
Usage: agammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*N.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + N.log(2.50662827465*ser)
def abetai(a, b, x, verbose=1):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented
here, using the betacf function. (Adapted from: Numerical Recipies in
C.) Can handle multiple dimensions.
Usage: abetai(a,b,x,verbose=1)
"""
TINY = 1e-15
if isinstance(a, N.ArrayType):
if asum(N.less(x, 0)+N.greater(x, 1)) != 0:
raise ValueError('Bad x in abetai')
x = N.where(N.equal(x, 0), TINY, x)
x = N.where(N.equal(x, 1.0), 1-TINY, x)
bt = N.where(N.equal(x, 0)+N.equal(x, 1), 0, -1)
exponents = (gammln(a+b)-gammln(a)-gammln(b)+a*N.log(x)+b * N.log(1.0-x))
# 746 (below) is the MAX POSSIBLE BEFORE OVERFLOW
exponents = N.where(N.less(exponents, -740), -740, exponents)
bt = N.exp(exponents)
if isinstance(x, N.ArrayType):
ans = N.where(N.less(x, (a+1)/(a+b+2.0)),
bt*abetacf(a, b, x, verbose)/float(a),
1.0-bt*abetacf(b, a, 1.0-x, verbose)/float(b))
else:
if x < (a+1)/(a+b+2.0):
ans = bt*abetacf(a, b, x, verbose)/float(a)
else:
ans = 1.0-bt*abetacf(b, a, 1.0-x, verbose)/float(b)
return ans
# AANOVA CALCULATIONS
import LinearAlgebra
LA = LinearAlgebra
def aglm(data, para):
"""
Calculates a linear model fit ... anova/ancova/lin-regress/t-test/etc. Taken
from:
Peterson et al. Statistical limitations in functional neuroimaging
I. Non-inferential methods and statistical models. Phil Trans Royal Soc
Lond B 354: 1239-1260.
Usage: aglm(data,para)
Returns: statistic, p-value ???
"""
if len(para) != len(data):
print("data and para must be same length in aglm")
return
n = len(para)
p = pstat.aunique(para)
x = N.zeros((n, len(p))) # design matrix
for l in range(len(p)):
x[:, l] = N.equal(para, p[l])
b = N.dot(N.dot(LA.inverse(N.dot(N.transpose(x), x)), # i.e., b=inv(X'X)X'Y
N.transpose(x)),
data)
diffs = (data - N.dot(x, b))
s_sq = 1./(n-len(p)) * N.dot(N.transpose(diffs), diffs)
if len(p) == 2: # ttest_ind
c = N.array([1, -1])
df = n-2
fact = asum(1.0/asum(x, 0)) # i.e., 1/n1 + 1/n2 + 1/n3 ...
t = N.dot(c, b) / N.sqrt(s_sq*fact)
probs = abetai(0.5*df, 0.5, float(df)/(df+t*t))
return t, probs
def aF_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: aF_oneway (*args) where *args is 2 or more arrays, one per
treatment group
Returns: f-value, probability
"""
na = len(args) # ANOVA on 'na' groups, each in it's own array
alldata = []
alldata = N.concatenate(args)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for a in args:
ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = na-1
dfwn = bign - na
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn, dfwn, f)
return f, prob
def aF_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
"""
return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF)))
def outputfstats(Enum, Eden, dfnum, dfden, f, prob):
Enum = round(Enum, 3)
Eden = round(Eden, 3)
dfnum = round(Enum, 3)
dfden = round(dfden, 3)
f = round(f, 3)
prob = round(prob, 3)
suffix = '' # for *s after the p-value
if prob < 0.001:
suffix = ' ***'
elif prob < 0.01:
suffix = ' **'
elif prob < 0.05:
suffix = ' *'
title = [['EF/ER', 'DF', 'Mean Square', 'F-value', 'prob', '']]
lofl = title+[[Enum, dfnum, round(Enum/float(dfnum), 3), f, prob, suffix],
[Eden, dfden, round(Eden/float(dfden), 3), '', '', '']]
pstat.printcc(lofl)
return
def F_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
where ER and EF are matrices from a multivariate F calculation.
"""
if type(ER) in [int, float]:
ER = N.array([[ER]])
if type(EF) in [int, float]:
EF = N.array([[EF]])
n_um = (LA.determinant(ER) - LA.determinant(EF)) / float(dfnum)
d_en = LA.determinant(EF) / float(dfden)
return n_um / d_en
# ASUPPORT FUNCTIONS
def asign(a):
"""
Usage: asign(a)
Returns: array shape of a, with -1 where a<0 and +1 where a>=0
"""
a = N.asarray(a)
if ((isinstance(a, type(1.4))) or (isinstance(a, type(1)))):
return a-a-N.less(a, 0)+N.greater(a, 0)
else:
return N.zeros(N.shape(a))-N.less(a, 0)+N.greater(a, 0)
def asum(a, dimension=None, keepdims=0):
"""
An alternative to the Numeric.add.reduce function, which allows one to
(1) collapse over multiple dimensions at once, and/or (2) to retain
all dimensions in the original array (squashing one down to size.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the resulting array will have as many
dimensions as the input array.
Usage: asum(a, dimension=None, keepdims=0)
Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1
"""
if isinstance(a, N.ArrayType) and a.typecode() in ['l', 's', 'b']:
a = a.astype(N.Float)
if dimension is None:
s = N.sum(N.ravel(a))
elif type(dimension) in [int, float]:
s = N.add.reduce(a, dimension)
if keepdims == 1:
shp = list(a.shape)
shp[dimension] = 1
s = N.reshape(s, shp)
else: # must be a SEQUENCE of dims to sum over
dims = sorted(dimension)
dims.reverse()
s = a * 1.0
for dim in dims:
s = N.add.reduce(s, dim)
if keepdims == 1:
shp = list(a.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s, shp)
return s
def acumsum(a, dimension=None):
"""
Returns an array consisting of the cumulative sum of the items in the
passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions, but this last one just barely makes sense).
Usage: acumsum(a,dimension=None)
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
if type(dimension) in [list, tuple, N.ArrayType]:
dimension = sorted(dimension)
dimension.reverse()
for d in dimension:
a = N.add.accumulate(a, d)
return a
else:
return N.add.accumulate(a, dimension)
def ass(inarray, dimension=None, keepdims=0):
"""
Squares each value in the passed array, adds these squares & returns
the result. Unfortunate function name. :-) Defaults to ALL values in
the array. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions). Set keepdims=1 to maintain the original number
of dimensions.
Usage: ass(inarray, dimension=None, keepdims=0)
Returns: sum-along-'dimension' for (inarray*inarray)
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
return asum(inarray*inarray, dimension, keepdims)
def asummult(array1, array2, dimension=None, keepdims=0):
"""
Multiplies elements in array1 and array2, element by element, and
returns the sum (along 'dimension') of all resulting multiplications.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). A trivial function, but included for completeness.
Usage: asummult(array1,array2,dimension=None,keepdims=0)
"""
if dimension is None:
array1 = N.ravel(array1)
array2 = N.ravel(array2)
dimension = 0
return asum(array1*array2, dimension, keepdims)
def asquare_of_sums(inarray, dimension=None, keepdims=0):
"""
Adds the values in the passed array, squares that sum, and returns the
result. Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the returned array will have the same
NUMBER of dimensions as the original.
Usage: asquare_of_sums(inarray, dimension=None, keepdims=0)
Returns: the square of the sum over dim(s) in dimension
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
s = asum(inarray, dimension, keepdims)
if isinstance(s, N.ArrayType):
return s.astype(N.Float)*s
else:
return float(s)*s
def asumdiffsquared(a, b, dimension=None, keepdims=0):
"""
Takes pairwise differences of the values in arrays a and b, squares
these differences, and returns the sum of these squares. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
keepdims=1 means the return shape = len(a.shape) = len(b.shape)
Usage: asumdiffsquared(a,b)
Returns: sum[ravel(a-b)**2]
"""
if dimension is None:
N.ravel(a) # inarray
dimension = 0
return asum((a-b)**2, dimension, keepdims)
def ashellsort(inarray):
"""
Shellsort algorithm. Sorts a 1D-array.
Usage: ashellsort(inarray)
Returns: sorted-inarray, sorting-index-vector (for original array)
"""
n = len(inarray)
svec = inarray * 1.0
ivec = list(range(n))
gap = n/2 # integer division needed
while gap > 0:
for i in range(gap, n):
for j in range(i-gap, -1, -gap):
while j >= 0 and svec[j] > svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted input vector, ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def arankdata(inarray):
"""
Ranks the data in inarray, dealing with ties appropritely. Assumes
a 1D inarray. Adapted from Gary Perlman's |Stat ranksort.
Usage: arankdata(inarray)
Returns: array of length equal to inarray, containing rank scores
"""
n = len(inarray)
svec, ivec = ashellsort(inarray)
sumranks = 0
dupcount = 0
newarray = N.zeros(n, N.Float)
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i == n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1, i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def afindwithin(data):
"""
Returns a binary vector, 1=within-subject factor, 0=between. Input
equals the entire data array (i.e., column 1=random factor, last
column = measured values.
Usage: afindwithin(data) data in |Stat format
"""
numfact = len(data[0])-2
withinvec = [0]*numfact
for col in range(1, numfact+1):
rows = pstat.linexand(data, col, pstat.unique(pstat.colex(data, 1))[0]) # get 1 level of this factor
if len(pstat.unique(pstat.colex(rows, 0))) < len(rows): # if fewer subjects than scores on this factor
withinvec[col-1] = 1
return withinvec
# RE-DEFINE DISPATCHES TO INCLUDE ARRAYS
# CENTRAL TENDENCY:
geometricmean = Dispatch((lgeometricmean, (list, tuple)), (ageometricmean, (N.ArrayType,)))
harmonicmean = Dispatch((lharmonicmean, (list, tuple)), (aharmonicmean, (N.ArrayType,)))
mean = Dispatch((lmean, (list, tuple)), (amean, (N.ArrayType,)))
median = Dispatch((lmedian, (list, tuple)), (amedian, (N.ArrayType,)))
medianscore = Dispatch((lmedianscore, (list, tuple)), (amedianscore, (N.ArrayType,)))
mode = Dispatch((lmode, (list, tuple)), (amode, (N.ArrayType,)))
tmean = Dispatch((atmean, (N.ArrayType,)))
tvar = Dispatch((atvar, (N.ArrayType,)))
tstdev = Dispatch((atstdev, (N.ArrayType,)))
tsem = Dispatch((atsem, (N.ArrayType,)))
# VARIATION:
moment = Dispatch((lmoment, (list, tuple)), (amoment, (N.ArrayType,)))
variation = Dispatch((lvariation, (list, tuple)), (avariation, (N.ArrayType,)))
skew = Dispatch((lskew, (list, tuple)), (askew, (N.ArrayType,)))
kurtosis = Dispatch((lkurtosis, (list, tuple)), (akurtosis, (N.ArrayType,)))
describe = Dispatch((ldescribe, (list, tuple)), (adescribe, (N.ArrayType,)))
# DISTRIBUTION TESTS
skewtest = Dispatch((askewtest, (list, tuple)), (askewtest, (N.ArrayType,)))
kurtosistest = Dispatch((akurtosistest, (list, tuple)), (akurtosistest, (N.ArrayType,)))
normaltest = Dispatch((anormaltest, (list, tuple)), (anormaltest, (N.ArrayType,)))
# FREQUENCY STATS:
itemfreq = Dispatch((litemfreq, (list, tuple)), (aitemfreq, (N.ArrayType,)))
scoreatpercentile = Dispatch((lscoreatpercentile, (list, tuple)), (ascoreatpercentile, (N.ArrayType,)))
percentileofscore = Dispatch((lpercentileofscore, (list, tuple)), (apercentileofscore, (N.ArrayType,)))
histogram = Dispatch((lhistogram, (list, tuple)), (ahistogram, (N.ArrayType,)))
cumfreq = Dispatch((lcumfreq, (list, tuple)), (acumfreq, (N.ArrayType,)))
relfreq = Dispatch((lrelfreq, (list, tuple)), (arelfreq, (N.ArrayType,)))
# VARIABILITY:
obrientransform = Dispatch((lobrientransform, (list, tuple)), (aobrientransform, (N.ArrayType,)))
samplevar = Dispatch((lsamplevar, (list, tuple)), (asamplevar, (N.ArrayType,)))
samplestdev = Dispatch((lsamplestdev, (list, tuple)), (asamplestdev, (N.ArrayType,)))
signaltonoise = Dispatch((asignaltonoise, (N.ArrayType,)),)
var = Dispatch((lvar, (list, tuple)), (avar, (N.ArrayType,)))
stdev = Dispatch((lstdev, (list, tuple)), (astdev, (N.ArrayType,)))
sterr = Dispatch((lsterr, (list, tuple)), (asterr, (N.ArrayType,)))
sem = Dispatch((lsem, (list, tuple)), (asem, (N.ArrayType,)))
z = Dispatch((lz, (list, tuple)), (az, (N.ArrayType,)))
zs = Dispatch((lzs, (list, tuple)), (azs, (N.ArrayType,)))
# TRIMMING FCNS:
threshold = Dispatch((athreshold, (N.ArrayType,)),)
trimboth = Dispatch((ltrimboth, (list, tuple)), (atrimboth, (N.ArrayType,)))
trim1 = Dispatch((ltrim1, (list, tuple)), (atrim1, (N.ArrayType,)))
# CORRELATION FCNS:
paired = Dispatch((lpaired, (list, tuple)), (apaired, (N.ArrayType,)))
pearsonr = Dispatch((lpearsonr, (list, tuple)), (apearsonr, (N.ArrayType,)))
spearmanr = Dispatch((lspearmanr, (list, tuple)), (aspearmanr, (N.ArrayType,)))
pointbiserialr = Dispatch((lpointbiserialr, (list, tuple)), (apointbiserialr, (N.ArrayType,)))
kendalltau = Dispatch((lkendalltau, (list, tuple)), (akendalltau, (N.ArrayType,)))
linregress = Dispatch((llinregress, (list, tuple)), (alinregress, (N.ArrayType,)))
# INFERENTIAL STATS:
ttest_1samp = Dispatch((lttest_1samp, (list, tuple)), (attest_1samp, (N.ArrayType,)))
ttest_ind = Dispatch((lttest_ind, (list, tuple)), (attest_ind, (N.ArrayType,)))
ttest_rel = Dispatch((lttest_rel, (list, tuple)), (attest_rel, (N.ArrayType,)))
chisquare = Dispatch((lchisquare, (list, tuple)), (achisquare, (N.ArrayType,)))
ks_2samp = Dispatch((lks_2samp, (list, tuple)), (aks_2samp, (N.ArrayType,)))
mannwhitneyu = Dispatch((lmannwhitneyu, (list, tuple)), (amannwhitneyu, (N.ArrayType,)))
tiecorrect = Dispatch((ltiecorrect, (list, tuple)), (atiecorrect, (N.ArrayType,)))
ranksums = Dispatch((lranksums, (list, tuple)), (aranksums, (N.ArrayType,)))
wilcoxont = Dispatch((lwilcoxont, (list, tuple)), (awilcoxont, (N.ArrayType,)))
kruskalwallish = Dispatch((lkruskalwallish, (list, tuple)), (akruskalwallish, (N.ArrayType,)))
friedmanchisquare = Dispatch((lfriedmanchisquare, (list, tuple)), (afriedmanchisquare, (N.ArrayType,)))
# PROBABILITY CALCS:
chisqprob = Dispatch((lchisqprob, (int, float)), (achisqprob, (N.ArrayType,)))
zprob = Dispatch((lzprob, (int, float)), (azprob, (N.ArrayType,)))
ksprob = Dispatch((lksprob, (int, float)), (aksprob, (N.ArrayType,)))
fprob = Dispatch((lfprob, (int, float)), (afprob, (N.ArrayType,)))
betacf = Dispatch((lbetacf, (int, float)), (abetacf, (N.ArrayType,)))
betai = Dispatch((lbetai, (int, float)), (abetai, (N.ArrayType,)))
erfcc = Dispatch((lerfcc, (int, float)), (aerfcc, (N.ArrayType,)))
gammln = Dispatch((lgammln, (int, float)), (agammln, (N.ArrayType,)))
# ANOVA FUNCTIONS:
F_oneway = Dispatch((lF_oneway, (list, tuple)), (aF_oneway, (N.ArrayType,)))
F_value = Dispatch((lF_value, (list, tuple)), (aF_value, (N.ArrayType,)))
# SUPPORT FUNCTIONS:
incr = Dispatch((lincr, (list, tuple, N.ArrayType)), )
sum = Dispatch((lsum, (list, tuple)), (asum, (N.ArrayType,)))
cumsum = Dispatch((lcumsum, (list, tuple)), (acumsum, (N.ArrayType,)))
ss = Dispatch((lss, (list, tuple)), (ass, (N.ArrayType,)))
summult = Dispatch((lsummult, (list, tuple)), (asummult, (N.ArrayType,)))
square_of_sums = Dispatch((lsquare_of_sums, (list, tuple)), (asquare_of_sums, (N.ArrayType,)))
sumdiffsquared = Dispatch((lsumdiffsquared, (list, tuple)), (asumdiffsquared, (N.ArrayType,)))
shellsort = Dispatch((lshellsort, (list, tuple)), (ashellsort, (N.ArrayType,)))
rankdata = Dispatch((lrankdata, (list, tuple)), (arankdata, (N.ArrayType,)))
findwithin = Dispatch((lfindwithin, (list, tuple)), (afindwithin, (N.ArrayType,)))
# END OF NUMERIC FUNCTION BLOCK
# END OF STATISTICAL FUNCTIONS
except ImportError:
pass
# -*- coding: utf-8 -*-
#
# Copyright (C) <2010-2012> Gabriel Falcão
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
from lettuce.core import STEP_REGISTRY
def _is_step_sentence(sentence):
return isinstance(sentence, str) or isinstance(sentence, basestring)
def step(step_func_or_sentence):
"""Decorates a function, so that it will become a new step
definition.
You give step sentence either (by priority):
* with step function argument (first example)
* with function doc (second example)
* with the function name exploded by underscores (third example)
Example::
>>> from lettuce import step
>>> from models import contact
>>>
>>> # First Example
>>> step(r'Given I delete the contact "(?P.*)" from my address book')
... def given_i_do_something(step, name):
... contact.delete_by_name(name)
... assert step.sentence == 'Given I delete the contact "John Doe" from my address book'
...
>>> # Second Example
>>> @step
... def given_i_delete_a_contact_from_my_address_book(step, name):
... '''Given I delete the contact "(?P.*)" from my address book'''
... contact.delete_by_name(name)
... assert step.sentence == 'Given I delete the contact "(?P.*)" from my address book'
...
>>> # Third Example
>>> @step
... def given_I_delete_the_contact_John_Doe_from_my_address_book(step):
... contact.delete_by_name("John Doe")
... assert step.sentence == 'Given I delete the contact John Doe from my address book'
Notice that all step definitions take a step object as argument.
"""
if _is_step_sentence(step_func_or_sentence):
return lambda func: STEP_REGISTRY.load(step_func_or_sentence, func)
else:
return STEP_REGISTRY.load_func(step_func_or_sentence)
def steps(steps_class):
"""Decorates a class, and set steps definitions from methods
except those in the attribute "exclude" or starting by underscore.
Steps sentences are taken from methods names or docs if exist.
Example::
>>> from lettuce import steps
>>> from models import contact
>>>
>>> @steps
>>> class ListOfSteps(object):
... exclude = ["delete_by_name"]
...
... def __init__(self, contact):
... self.contact = contact
...
... def given_i_delete_a_contact_from_my_address_book(self, step, name):
... '''Given I delete the contact "(?P.*)" from my address book'''
... self.delete_by_name(name)
... assert step.sentence == 'Given I delete the contact "(?P.*)" from my address book'
...
... def given_I_delete_the_contact_John_Doe_from_my_address_book(self, step):
... self.delete_by_name("John Doe")
... assert step.sentence == 'Given I delete the contact John Doe from my address book'
...
... def delete_by_name(self, name):
... self.contact.delete_by_name(name)
...
>>> ListOfSteps(contact)
Notice steps are added when an object of the class is created.
"""
if hasattr(steps_class, '__init__'):
_init_ = getattr(steps_class, '__init__')
def init(self, *args, **kwargs):
_init_(self, *args, **kwargs)
STEP_REGISTRY.load_steps(self)
else:
def init(self, *args, **kwargs):
STEP_REGISTRY.load_steps(self)
setattr(steps_class, '__init__', init)
return steps_class
# At the time of this writing,
# 4XSLT generates a traceback when you do an apply-templates on a result tree
# fragment. It should generate a friendly (but equally fatal) error.
#
from Xml.Xslt import test_harness
sheet_1 = """\
Processing the root node of the fragment.
Processing the 'myElement' node of the fragment.
This element has
ancestor(s).
"""
expected_1 = """\
Processing the root node of the fragment. hello world
Processing the 'myElement' node of the fragment.
This element has 1 ancestor(s).
"""
sheet_2 = """\
Processing the 'myElement' node of the fragment.
"""
expected_2 = """\
Processing the 'myElement' node of the fragment.
"""
def Test(tester):
source = test_harness.FileInfo(string=sheet_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1,
title='Case 1')
source = test_harness.FileInfo(string=sheet_2)
sheet = test_harness.FileInfo(string=sheet_2)
test_harness.XsltTest(tester, source, [sheet], expected_2,
title='Case 2')
return
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The rapidart module provides routines for artifact detection and region of
interest analysis.
These functions include:
* ArtifactDetect: performs artifact detection on functional images
* StimulusCorrelation: determines correlation between stimuli
schedule and movement/intensity parameters
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
import os
from copy import deepcopy
from warnings import warn
from nibabel import load, funcs, Nifti1Image
import numpy as np
from scipy import signal
import scipy.io as sio
from nipype.external import six
from ..interfaces.base import (BaseInterface, traits, InputMultiPath,
OutputMultiPath, TraitedSpec, File,
BaseInterfaceInputSpec, isdefined)
from ..utils.filemanip import filename_to_list, save_json, split_filename
from ..utils.misc import find_indices
from .. import logging, config
iflogger = logging.getLogger('interface')
def _get_affine_matrix(params, source):
"""Return affine matrix given a set of translation and rotation parameters
params : np.array (upto 12 long) in native package format
source : the package that generated the parameters
supports SPM, AFNI, FSFAST, FSL, NIPY
"""
if source == 'FSL':
params = params[[3, 4, 5, 0, 1, 2]]
elif source in ('AFNI', 'FSFAST'):
params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]
params[3:] = params[3:] * np.pi / 180.
if source == 'NIPY':
# nipy does not store typical euler angles, use nipy to convert
from nipy.algorithms.registration import to_matrix44
return to_matrix44(params)
#process for FSL, SPM, AFNI and FSFAST
rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)],
[-np.sin(x), np.cos(x)]])
q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])
if len(params) < 12:
params = np.hstack((params, q[len(params):]))
params.shape = (len(params),)
# Translation
T = np.eye(4)
T[0:3, -1] = params[0:3]
# Rotation
Rx = np.eye(4)
Rx[1:3, 1:3] = rotfunc(params[3])
Ry = np.eye(4)
Ry[(0, 0, 2, 2), (0, 2, 0, 2)] = rotfunc(params[4]).ravel()
Rz = np.eye(4)
Rz[0:2, 0:2] = rotfunc(params[5])
# Scaling
S = np.eye(4)
S[0:3, 0:3] = np.diag(params[6:9])
# Shear
Sh = np.eye(4)
Sh[(0, 0, 1), (1, 2, 2)] = params[9:12]
if source in ('AFNI', 'FSFAST'):
return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh)))))
return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh)))))
def _calc_norm(mc, use_differences, source, brain_pts=None):
"""Calculates the maximum overall displacement of the midpoints
of the faces of a cube due to translation and rotation.
Parameters
----------
mc : motion parameter estimates
[3 translation, 3 rotation (radians)]
use_differences : boolean
brain_pts : [4 x n_points] of coordinates
Returns
-------
norm : at each time point
displacement : euclidean distance (mm) of displacement at each coordinate
"""
if brain_pts is None:
respos = np.diag([70, 70, 75])
resneg = np.diag([-70, -110, -45])
all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6))))
displacement = None
else:
all_pts = brain_pts
n_pts = all_pts.size - all_pts.shape[1]
newpos = np.zeros((mc.shape[0], n_pts))
if brain_pts is not None:
displacement = np.zeros((mc.shape[0], n_pts / 3))
for i in range(mc.shape[0]):
affine = _get_affine_matrix(mc[i, :], source)
newpos[i, :] = np.dot(affine,
all_pts)[0:3, :].ravel()
if brain_pts is not None:
displacement[i, :] = \
np.sqrt(np.sum(np.power(np.reshape(newpos[i, :],
(3, all_pts.shape[1])) -
all_pts[0:3, :],
2),
axis=0))
# np.savez('displacement.npz', newpos=newpos, pts=all_pts)
normdata = np.zeros(mc.shape[0])
if use_differences:
newpos = np.concatenate((np.zeros((1, n_pts)),
np.diff(newpos, n=1, axis=0)), axis=0)
for i in range(newpos.shape[0]):
normdata[i] = \
np.max(np.sqrt(np.sum(np.reshape(np.power(np.abs(newpos[i, :]), 2),
(3, all_pts.shape[1])), axis=0)))
else:
newpos = np.abs(signal.detrend(newpos, axis=0, type='constant'))
normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1))
return normdata, displacement
def _nanmean(a, axis=None):
"""Return the mean excluding items that are nan
>>> a = [1, 2, np.nan]
>>> _nanmean(a)
1.5
"""
if axis:
return np.nansum(a, axis) / np.sum(1 - np.isnan(a), axis)
else:
return np.nansum(a) / np.sum(1 - np.isnan(a))