output_file("stocks_timeseries.html")
ts = TimeSeries(
xyvalues, index='Date', legend=True,
title="Timeseries", tools=TOOLS, ylabel='Stock Prices')
# usage with iterable index
#ts = TimeSeries(
# lxyvalues, index=lindex,
# title="timeseries, pd_input", ylabel='Stock Prices')
show(ts)
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from resource_management import Execute, File
from tempfile import mkstemp
import os
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
from resource_management.core.source import StaticFile
FILE_TYPE_XML = 'XML'
FILE_TYPE_PROPERTIES = 'PROPERTIES'
FILE_TYPE_JAAS_CONF = 'JAAS_CONF'
# The property name used by the hadoop credential provider
HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME = 'hadoop.security.credential.provider.path'
# Copy JCEKS provider to service specific location and update the ACL
def update_credential_provider_path(config, config_type, dest_provider_path, file_owner, file_group, use_local_jceks=False):
"""
Copies the JCEKS file for the specified config from the default location to the given location,
and sets the ACLs for the specified owner and group. Also updates the config type's configuration
hadoop credential store provider with the copied file name.
:param config: configurations['configurations'][config_type]
:param config_type: Like hive-site, oozie-site, etc.
:param dest_provider_path: The full path to the file where the JCEKS provider file is to be copied to.
:param file_owner: File owner
:param file_group: Group
:return: A copy of the config that was modified or the input config itself if nothing was modified.
"""
# Get the path to the provider .jceks
if HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME in config:
provider_path = config[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME]
src_provider_path = provider_path[len('jceks://file'):]
File(dest_provider_path,
owner = file_owner,
group = file_group,
mode = 0640,
content = StaticFile(src_provider_path)
)
# make a copy of the config dictionary since it is read-only
config_copy = config.copy()
# overwrite the provider path with the path specified
if use_local_jceks:
config_copy[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'localjceks://file{0}'.format(dest_provider_path)
else:
config_copy[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'jceks://file{0}'.format(dest_provider_path)
return config_copy
return config
def validate_security_config_properties(params, configuration_rules):
"""
Generic security configuration validation based on a set of rules and operations
:param params: The structure where the config parameters are held
:param configuration_rules: A structure containing rules and expectations,
Three types of checks are currently supported by this method:
1. value_checks - checks that a certain value must be set
2. empty_checks - checks that the property values must not be empty
3. read_checks - checks that the value represented by the property describes a readable file on the filesystem
:return: Issues found - should be empty if all is good
"""
issues = {}
for config_file, rule_sets in configuration_rules.iteritems():
# Each configuration rule set may have 0 or more of the following rule sets:
# - value_checks
# - empty_checks
# - read_checks
try:
# Each rule set has at least a list of relevant property names to check in some way
# The rule set for the operation of 'value_checks' is expected to be a dictionary of
# property names to expected values
actual_values = params[config_file] if config_file in params else {}
# Process Value Checks
# The rules are expected to be a dictionary of property names to expected values
rules = rule_sets['value_checks'] if 'value_checks' in rule_sets else None
if rules:
for property_name, expected_value in rules.iteritems():
actual_value = get_value(actual_values, property_name, '')
if actual_value != expected_value:
issues[config_file] = "Property %s contains an unexpected value. " \
"Expected/Actual: %s/%s" \
% (property_name, expected_value, actual_value)
# Process Empty Checks
# The rules are expected to be a list of property names that should not have empty values
rules = rule_sets['empty_checks'] if 'empty_checks' in rule_sets else None
if rules:
for property_name in rules:
actual_value = get_value(actual_values, property_name, '')
if not actual_value:
issues[config_file] = "Property %s must exist and must not be empty" % property_name
# Process Read Checks
# The rules are expected to be a list of property names that resolve to files names and must
# exist and be readable
rules = rule_sets['read_checks'] if 'read_checks' in rule_sets else None
if rules:
for property_name in rules:
actual_value = get_value(actual_values, property_name, None)
if not actual_value:
issues[config_file] = "Property %s does not exist" % property_name
elif not os.path.isfile(actual_value):
issues[config_file] = "Property %s points to an inaccessible file - %s" % (property_name, actual_value)
except Exception as e:
issues[config_file] = "Exception occurred while validating the config file\nCauses: %s" % str(e)
return issues
def build_expectations(config_file, value_checks, empty_checks, read_checks):
"""
Helper method used to build the check expectations dict
:return:
"""
configs_expectations = {}
configs_expectations[config_file] = {}
if value_checks:
configs_expectations[config_file]['value_checks'] = value_checks
if empty_checks:
configs_expectations[config_file]['empty_checks'] = empty_checks
if read_checks:
configs_expectations[config_file]['read_checks'] = read_checks
return configs_expectations
def get_params_from_filesystem(conf_dir, config_files):
"""
Used to retrieve properties from xml config files and build a dict
The dictionary of configuration files to file types should contain one of the following values"
'XML'
'PROPERTIES'
:param conf_dir: directory where the configuration files sit
:param config_files: dictionary of configuration file names to (supported) file types
:return: a dictionary of config-type to a dictionary of key/value pairs for
"""
result = {}
from xml.etree import ElementTree as ET
import ConfigParser, StringIO
import re
for config_file, file_type in config_files.iteritems():
file_name, file_ext = os.path.splitext(config_file)
config_filepath = conf_dir + os.sep + config_file
if not os.path.isfile(config_filepath):
continue
if file_type == FILE_TYPE_XML:
configuration = ET.parse(config_filepath)
props = configuration.getroot().getchildren()
config_file_id = file_name if file_name else config_file
result[config_file_id] = {}
for prop in props:
result[config_file_id].update({prop[0].text: prop[1].text})
elif file_type == FILE_TYPE_PROPERTIES:
with open(config_filepath, 'r') as f:
config_string = '[root]\n' + f.read()
ini_fp = StringIO.StringIO(re.sub(r'\\\s*\n', '\\\n ', config_string))
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
props = config.items('root')
result[file_name] = {}
for key, value in props:
result[file_name].update({key : value})
elif file_type == FILE_TYPE_JAAS_CONF:
section_header = re.compile('^(\w+)\s+\{\s*$')
section_data = re.compile('^\s*([^ \s\=\}\{]+)\s*=?\s*"?([^ ";]+)"?;?\s*$')
section_footer = re.compile('^\}\s*;?\s*$')
section_name = "root"
result[file_name] = {}
with open(config_filepath, 'r') as f:
for line in f:
if line:
line = line.strip()
m = section_header.search(line)
if m:
section_name = m.group(1)
if section_name not in result[file_name]:
result[file_name][section_name] = {}
else:
m = section_footer.search(line)
if m:
section_name = "root"
else:
m = section_data.search(line)
if m:
result[file_name][section_name][m.group(1)] = m.group(2)
return result
def cached_kinit_executor(kinit_path, exec_user, keytab_file, principal, hostname, temp_dir,
expiration_time=5):
"""
Main cached kinit executor - Uses a temporary file on the FS to cache executions. Each command
will have its own file and only one entry (last successful execution) will be stored
"""
key = str(hash("%s|%s" % (principal, keytab_file)))
filename = key + "_tmp.txt"
file_path = temp_dir + os.sep + "kinit_executor_cache"
output = None
# First execution scenario dir file existence check
if not os.path.exists(file_path):
os.makedirs(file_path)
file_path += os.sep + filename
# If the file does not exist create before read
if not os.path.isfile(file_path):
with open(file_path, 'w+') as new_file:
new_file.write("{}")
try:
with open(file_path, 'r') as cache_file:
output = json.load(cache_file)
except:
# In the extraordinary case the temporary file gets corrupted the cache should be reset to avoid error loop
with open(file_path, 'w+') as cache_file:
cache_file.write("{}")
if (not output) or (key not in output) or ("last_successful_execution" not in output[key]):
new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname)
else:
last_run_time = output[key]["last_successful_execution"]
now = datetime.now()
if (now - datetime.strptime(last_run_time, "%Y-%m-%d %H:%M:%S.%f") > timedelta(minutes=expiration_time)):
new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname)
def new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname):
"""
Entry point of an actual execution - triggered when timeout on the cache expired or on fresh execution
"""
now = datetime.now()
temp_kinit_cache_fd, temp_kinit_cache_filename = mkstemp(dir=temp_dir)
command = "%s -c %s -kt %s %s" % \
(kinit_path, temp_kinit_cache_filename, keytab_file,
principal.replace("_HOST", hostname))
os.close(temp_kinit_cache_fd)
try:
# Ensure the proper user owns this file
File(temp_kinit_cache_filename, owner=exec_user, mode=0600)
# Execute the kinit
Execute(command, user=exec_user)
with open(file_path, 'w+') as cache_file:
result = {key: {"last_successful_execution": str(now)}}
json.dump(result, cache_file)
finally:
File(temp_kinit_cache_filename, action='delete')
def get_value(values, property_path, default_value):
names = property_path.split('/')
current_dict = values
for name in names:
if name in current_dict:
current_dict = current_dict[name]
else:
return default_value
return current_dict
# GUI that displays data produced and queued by worker threads (class-based)
import threading, queue, time
from tkinter.scrolledtext import ScrolledText # or PP4E.Gui.Tour.scrolledtext
class ThreadGui(ScrolledText):
threadsPerClick = 4
def __init__(self, parent=None):
ScrolledText.__init__(self, parent)
self.pack()
self.dataQueue = queue.Queue() # infinite size
self.bind('', self.makethreads) # on left mouse click
self.consumer() # queue loop in main thread
def producer(self, id):
for i in range(5):
time.sleep(0.1)
self.dataQueue.put('[producer id=%d, count=%d]' % (id, i))
def consumer(self):
try:
data = self.dataQueue.get(block=False)
except queue.Empty:
pass
else:
self.insert('end', 'consumer got => %s\n' % str(data))
self.see('end')
self.after(100, self.consumer) # 10 times per sec
def makethreads(self, event):
for i in range(self.threadsPerClick):
threading.Thread(target=self.producer, args=(i,)).start()
if __name__ == '__main__':
root = ThreadGui() # in main thread: make GUI, run timer loop
root.mainloop() # pop-up window, enter tk event loop
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
_mode = cuda.libcudnn.CUDNN_ACTIVATION_SIGMOID
class Sigmoid(function_node.FunctionNode):
"""Logistic sigmoid function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, inputs):
x = inputs[0]
half = x.dtype.type(0.5)
y = utils.force_array(numpy.tanh(x * half) * half + half)
self.retain_outputs((0,))
self._use_cudnn = False
return y,
def forward_gpu(self, inputs):
x = inputs[0]
if chainer.should_use_cudnn('==always') and x.flags.c_contiguous:
y = cudnn.activation_forward(x, _mode)
self.retain_inputs((0,))
self._use_cudnn = True
else:
y = cuda.elementwise(
'T x', 'T y', 'y = tanh(x * 0.5) * 0.5 + 0.5',
'sigmoid_fwd')(x)
self._use_cudnn = False
self.retain_outputs((0,))
return y,
def backward(self, indexes, grad_outputs):
if self._use_cudnn:
x = self.get_retained_inputs()[0].data
else:
x = None
y = self.get_retained_outputs()[0]
gy, = grad_outputs
return SigmoidGrad((x,)).apply((y, gy))
class SigmoidGrad(function_node.FunctionNode):
"""Logistic sigmoid gradient function."""
def __init__(self, inputs):
super(SigmoidGrad, self).__init__()
self.x = inputs[0]
def check_type_forward(self, in_types):
type_check._argname(in_types, ('y', 'gy'))
type_check.expect(in_types[0].dtype.kind == 'f')
type_check.expect(in_types[1].dtype.kind == 'f')
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
one = y.dtype.type(1)
return utils.force_array(gy * y * (one - y)),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
if (chainer.should_use_cudnn('==always') and gy.flags.c_contiguous and
self.x is not None and self.x.flags.c_contiguous):
gx = cudnn.activation_backward(self.x, y, gy, _mode)
else:
gx = cuda.elementwise(
'T y, T gy', 'T gx',
'gx = gy * y * (1 - y)',
'sigmoid_bwd')(y, gy)
return gx,
def backward(self, indexes, grad_outputs):
y, gy = self.get_retained_inputs()
ggx, = grad_outputs
return ggx * gy * (1 - 2 * y), ggx * y * (1 - y)
def sigmoid(x):
"""Element-wise sigmoid logistic function.
.. math:: f(x)=(1 + \\exp(-x))^{-1}.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
It maps the input values into the range of :math:`[0, 1]`.
>>> x = np.arange(-2, 3, 2).astype(np.float32)
>>> x
array([-2., 0., 2.], dtype=float32)
>>> F.sigmoid(x).array
array([0.11920291, 0.5 , 0.8807971 ], dtype=float32)
"""
y, = Sigmoid().apply((x,))
return y
from __future__ import division, absolute_import, print_function
import os
import re
import sys
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
import distutils
from distutils.errors import DistutilsError
from distutils.msvccompiler import get_build_architecture
try:
from threading import local as tlocal
except ImportError:
from dummy_threading import local as tlocal
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
# store all created temporary directories so they can be deleted on exit
_tmpdirs = []
def clean_up_temporary_directory():
if _tmpdirs is not None:
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
atexit.register(clean_up_temporary_directory)
try:
set
except NameError:
from sets import Set as set
from numpy.distutils.compat import get_exception
from numpy.compat import basestring
from numpy.compat import npy_load_module
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
class InstallableLib(object):
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS checked and if that is unset it returns 1.
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", 1))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
splitted = name.split('/')
return os.path.join(*splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realpath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realpath(os.path.abspath(parent_path))
apath = os.path.realpath(os.path.abspath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
# hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/', os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path, '_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
fid = open(config_file)
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
fid.close()
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = glob.glob(n)
p2 = glob.glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normcase(path[11:])
return path
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_version():
"Return version of MSVC runtime library, as defined by __MSC_VER__ macro"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = int(sys.version[msc_pos+6:msc_pos+10])
else:
msc_ver = None
return msc_ver
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
ver = msvc_runtime_major ()
if ver:
if ver < 140:
return "msvcr%i" % ver
else:
return "vcruntime%i" % ver
else:
return None
def msvc_runtime_major():
"Return major version of MSVC runtime coded like get_build_msvc_version"
major = {1300: 70, # MSVC 7.0
1310: 71, # MSVC 7.1
1400: 80, # MSVC 8
1500: 90, # MSVC 9 (aka 2008)
1600: 100, # MSVC 10 (aka 2010)
1900: 140, # MSVC 14 (aka 2015)
}.get(msvc_runtime_version(), None)
return major
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
f = open(source, 'r')
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
f.close()
return modules
def is_string(s):
return isinstance(s, basestring)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' is s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def _commandline_dep_string(cc_args, extra_postargs, pp_opts):
"""
Return commandline representation used to determine if a file needs
to be recompiled
"""
cmdline = 'commandline: '
cmdline += ' '.join(cc_args)
cmdline += ' '.join(extra_postargs)
cmdline += ' '.join(pp_opts) + '\n'
return cmdline
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(), abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149. For Python 3.2 this is implemented on
Linux, but not on OS X.
"""
confvars = distutils.sysconfig.get_config_vars()
# SO is deprecated in 3.3.1, use EXT_SUFFIX instead
so_ext = confvars.get('EXT_SUFFIX', None)
if so_ext is None:
so_ext = confvars.get('SO', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# fall back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = npy_load_module('_'.join(n.split('.')),
setup_py,
('.py', 'U', 1))
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
def fix_args_py2(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
def fix_args_py3(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
if sys.version_info[0] < 3:
args = fix_args_py2(args)
else:
args = fix_args_py3(args)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (, )
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths::
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat:
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (,)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. *.txt -> parent/a.txt, parent/b.txt
#. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
#. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under // directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (,)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
name = name #+ '__OF__' + self.name
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
"""
if subst_dict is None:
subst_dict = {}
basename = os.path.splitext(template)[0]
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the /bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['svnversion'], shell=True,
stdout=subprocess.PIPE, stderr=None,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
f = open(entries)
fstr = f.read()
f.close()
if fstr[:5] == '\d+)"', fstr)
if m:
revision = int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P\d+)', fstr)
if m:
revision = int(m.group('revision'))
return revision
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
revision = None
m = None
cwd = os.getcwd()
try:
os.chdir(path or '.')
p = subprocess.Popen(['hg identify --num'], shell=True,
stdout=subprocess.PIPE, stderr=None,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P\d+)', sout.read())
except:
pass
os.chdir(cwd)
if m:
revision = int(m.group('revision'))
return revision
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
f = open(branch_fn)
revision0 = f.read().strip()
f.close()
branch_map = {}
for line in file(branch_cache_fn, 'r'):
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
revision = branch_map.get(branch0)
return revision
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, _version.py, version.py, and
__svn_version__.py for string variables version, __version__, and
_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = npy_load_module('_'.join(n.split('.')),
fn, info)
except ImportError:
msg = get_exception()
self.warn(str(msg))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
f = open(target, 'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
f = open(target, 'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory."""
# XXX: import here for bootstrapping reasons
import numpy
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
try:
builtins.__NUMPY_SETUP__
return True
except AttributeError:
return False
__NUMPY_SETUP__ = False
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
), stacklevel=2)
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, 'w')
f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
''')
f.close()
return target
def msvc_version(compiler):
"""Return version major and minor of compiler instance if it is
MSVC, raise an exception otherwise."""
if not compiler.compiler_type == "msvc":
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
"""Flash Object to be inherited"""
class SerialFlash(object):
"""Interface of a generic SPI flash device"""
FEAT_NONE = 0x000 # No special feature
FEAT_LOCK = 0x001 # Basic, revertable locking
FEAT_INVLOCK = 0x002 # Inverted (bottom/top) locking
FEAT_SECTLOCK = 0x004 # Arbitrary sector locking
FEAT_OTPLOCK = 0x008 # OTP locking available
FEAT_UNIQUEID = 0x010 # Unique ID
FEAT_SECTERASE = 0x100 # Can erase whole sectors
FEAT_HSECTERASE = 0x200 # Can erase half sectors
FEAT_SUBSECTERASE = 0x400 # Can erase sub sectors
def read(self, address, length):
"""Read a sequence of bytes from the specified address."""
raise NotImplementedError()
def write(self, address, data):
"""Write a sequence of bytes, starting at the specified address."""
raise NotImplementedError()
def erase(self, address, length):
"""Erase a block of bytes. Address and length depends upon device-
specific constraints."""
raise NotImplementedError()
def can_erase(self, address, length):
"""Tells whether a defined area can be erased on the Spansion flash
device. It does not take into account any locking scheme."""
raise NotImplementedError()
def is_busy(self):
"""Reports whether the flash may receive commands or is actually
being performing internal work"""
raise NotImplementedError()
def get_capacity(self):
"""Get the flash device capacity in bytes"""
raise NotImplementedError()
def get_capabilities(self):
"""Flash device capabilities."""
return SerialFlash.FEAT_NONE
def get_locks(self):
"""Report the currently write-protected areas of the device."""
raise NotImplementedError()
def set_lock(self, address, length, otp=False):
"""Create a write-protected area. Device should have been unlocked
first."""
raise NotImplementedError()
def unlock(self):
"""Make the whole device read/write"""
pass
def get_unique_id(self):
"""Return the unique ID of the flash, if it exists"""
raise NotImplementedError()
import unittest
try:
from unittest.mock import patch # Python 3.4 and later
except ImportError:
from mock import patch
from ncclient import manager
import ncclient.transport
from ncclient.operations.third_party.alu.rpc import *
class TestRPC(unittest.TestCase):
def setUp(self):
self.device_handler = manager.make_device_handler({'name': 'alu'})
@patch('ncclient.operations.third_party.alu.rpc.RPC._request')
def test_showCLI(self, mock_request):
mock_request.return_value = 'alu'
expected = 'alu'
session = ncclient.transport.SSHSession(self.device_handler)
obj = ShowCLI(session, self.device_handler)
command = 'show system users'
actual = obj.request(command=command)
self.assertEqual(expected, actual)
@patch('ncclient.operations.third_party.alu.rpc.RPC._request')
def test_getConfiguration(self, mock_request):
mock_request.return_value = 'alu'
expected = 'alu'
session = ncclient.transport.SSHSession(self.device_handler)
obj = GetConfiguration(session, self.device_handler)
content = 'xml'
actual = obj.request(content=content)
self.assertEqual(expected, actual)
filter = 'device-name'
actual = obj.request(content=content, filter=filter)
self.assertEqual(expected, actual)
content = 'cli'
actual = obj.request(content=content, filter=filter)
self.assertEqual(expected, actual)
detail = True
actual = obj.request(content=content, filter=filter, detail=detail)
self.assertEqual(expected, actual)
content = ''
actual = obj.request(content=content, filter=filter, detail=detail)
self.assertEqual(expected, actual)
@patch('ncclient.operations.third_party.alu.rpc.RPC._request')
def test_loadConfiguration(self, mock_request):
mock_request.return_value = 'alu'
expected = 'alu'
session = ncclient.transport.SSHSession(self.device_handler)
obj = LoadConfiguration(session, self.device_handler)
default_operation = ''
format = 'xml'
actual = obj.request(format=format, default_operation=default_operation)
self.assertEqual(expected, actual)
default_operation = 'get'
actual=obj.request(format=format, default_operation=default_operation)
self.assertEqual(expected, actual)
config = new_ele('device-name')
actual=obj.request(format=format, default_operation=default_operation, config=config)
self.assertEqual(expected, actual)
config = 'device-name'
format = 'cli'
actual=obj.request(format=format, default_operation=default_operation, config=config)
self.assertEqual(expected, actual)
default_operation = ''
actual=obj.request(format=format, default_operation=default_operation, config=config)
self.assertEqual(expected, actual)
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
import logging
from snapcraft.internal import log
from snapcraft import tests
from snapcraft.tests import fixture_setup
class LogTestCase(tests.TestCase):
def setUp(self):
super().setUp()
self.info_color = log._ColoredFormatter.LEVEL_COLORS['INFO']
self.warning_color = log._ColoredFormatter.LEVEL_COLORS['WARNING']
self.error_color = log._ColoredFormatter.LEVEL_COLORS['ERROR']
self.critical_color = log._ColoredFormatter.LEVEL_COLORS['CRITICAL']
def test_configure_must_send_messages_to_stdout(self):
logger_name = self.id()
log.configure(logger_name)
logger = logging.getLogger(logger_name)
# Overwrite the level to log everything.
logger.setLevel(logging.DEBUG)
logger.debug('Test debug')
logger.info('Test info')
logger.warning('Test warning')
expected_out = ('Test debug\n'
'{}Test info\033[0m\n'
'{}Test warning\033[0m\n').format(
self.info_color, self.warning_color)
self.assertEqual(expected_out,
self.fake_terminal.getvalue())
self.assertEqual('', self.fake_terminal.getvalue(stderr=True))
def test_configure_must_send_errors_to_stderr(self):
logger_name = self.id()
log.configure(logger_name)
logger = logging.getLogger(logger_name)
# Overwrite the level to log everything.
logger.setLevel(logging.DEBUG)
logger.error('Test error')
logger.critical('Test critical')
expected_err = ('{}Test error\033[0m\n'
'{}Test critical\033[0m\n').format(
self.error_color, self.critical_color)
self.assertEqual(expected_err,
self.fake_terminal.getvalue(stderr=True))
self.assertEqual('', self.fake_terminal.getvalue())
def test_configure_must_log_info_and_higher(self):
logger_name = self.id()
log.configure(logger_name)
logger = logging.getLogger(logger_name)
logger.debug('Test debug')
logger.info('Test info')
logger.warning('Test warning')
logger.error('Test error')
logger.critical('Test critical')
expected_out = ('{}Test info\033[0m\n'
'{}Test warning\033[0m\n').format(
self.info_color, self.warning_color)
expected_err = ('{}Test error\033[0m\n'
'{}Test critical\033[0m\n').format(
self.error_color, self.critical_color)
self.assertEqual(expected_out, self.fake_terminal.getvalue())
self.assertEqual(expected_err,
self.fake_terminal.getvalue(stderr=True))
def test_configure_must_support_debug(self):
logger_name = self.id()
log.configure(logger_name, log_level=logging.DEBUG)
logger = logging.getLogger(logger_name)
logger.debug('Test debug')
logger.info('Test info')
logger.warning('Test warning')
logger.error('Test error')
logger.critical('Test critical')
expected_out = ('Test debug\n'
'{}Test info\033[0m\n'
'{}Test warning\033[0m\n').format(
self.info_color, self.warning_color)
expected_err = ('{}Test error\033[0m\n'
'{}Test critical\033[0m\n').format(
self.error_color, self.critical_color)
self.assertEqual(expected_out, self.fake_terminal.getvalue())
self.assertEqual(expected_err,
self.fake_terminal.getvalue(stderr=True))
def test_configure_must_support_no_tty(self):
self.fake_terminal = fixture_setup.FakeTerminal(isatty=False)
self.useFixture(self.fake_terminal)
logger_name = self.id()
log.configure(logger_name, log_level=logging.DEBUG)
logger = logging.getLogger(logger_name)
logger.debug('Test debug')
logger.info('Test info')
logger.warning('Test warning')
logger.error('Test error')
logger.critical('Test critical')
expected_out = ('Test debug\n'
'Test info\n'
'Test warning\n')
expected_err = ('Test error\n'
'Test critical\n')
self.assertEqual(expected_out, self.fake_terminal.getvalue())
self.assertEqual(expected_err,
self.fake_terminal.getvalue(stderr=True))
import os
import urlparse
from fnmatch import fnmatch
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
import html5lib
import vcs
from item import Stub, ManualTest, WebdriverSpecTest, RefTest, TestharnessTest
from utils import rel_path_to_url, is_blacklisted, ContextManagerStringIO, cached_property
wd_pattern = "*.py"
class SourceFile(object):
parsers = {"html":lambda x:html5lib.parse(x, treebuilder="etree"),
"xhtml":ElementTree.parse,
"svg":ElementTree.parse}
def __init__(self, tests_root, rel_path, url_base, use_committed=False):
"""Object representing a file in a source tree.
:param tests_root: Path to the root of the source tree
:param rel_path: File path relative to tests_root
:param url_base: Base URL used when converting file paths to urls
:param use_committed: Work with the last committed version of the file
rather than the on-disk version.
"""
self.tests_root = tests_root
self.rel_path = rel_path
self.url_base = url_base
self.use_committed = use_committed
self.url = rel_path_to_url(rel_path, url_base)
self.path = os.path.join(tests_root, rel_path)
self.dir_path, self.filename = os.path.split(self.path)
self.name, self.ext = os.path.splitext(self.filename)
self.type_flag = None
if "-" in self.name:
self.type_flag = self.name.rsplit("-", 1)[1]
self.meta_flags = self.name.split(".")[1:]
def __getstate__(self):
# Remove computed properties if we pickle this class
rv = self.__dict__.copy()
if "__cached_properties__" in rv:
cached_properties = rv["__cached_properties__"]
for key in rv.keys():
if key in cached_properties:
del rv[key]
del rv["__cached_properties__"]
return rv
def name_prefix(self, prefix):
"""Check if the filename starts with a given prefix
:param prefix: The prefix to check"""
return self.name.startswith(prefix)
def open(self):
"""Return a File object opened for reading the file contents,
or the contents of the file when last committed, if
use_comitted is true."""
if self.use_committed:
git = vcs.get_git_func(os.path.dirname(__file__))
blob = git("show", "HEAD:%s" % self.rel_path)
file_obj = ContextManagerStringIO(blob)
else:
file_obj = open(self.path)
return file_obj
@property
def name_is_non_test(self):
"""Check if the file name matches the conditions for the file to
be a non-test file"""
return (os.path.isdir(self.rel_path) or
self.name_prefix("MANIFEST") or
self.filename.startswith(".") or
is_blacklisted(self.url))
@property
def name_is_stub(self):
"""Check if the file name matches the conditions for the file to
be a stub file"""
return self.name_prefix("stub-")
@property
def name_is_manual(self):
"""Check if the file name matches the conditions for the file to
be a manual test file"""
return self.type_flag == "manual"
@property
def name_is_worker(self):
"""Check if the file name matches the conditions for the file to
be a worker js test file"""
return "worker" in self.meta_flags and self.ext == ".js"
@property
def name_is_webdriver(self):
"""Check if the file name matches the conditions for the file to
be a webdriver spec test file"""
# wdspec tests are in subdirectories of /webdriver excluding __init__.py
# files.
rel_dir_tree = self.rel_path.split(os.path.sep)
return (rel_dir_tree[0] == "webdriver" and
len(rel_dir_tree) > 2 and
self.filename != "__init__.py" and
fnmatch(self.filename, wd_pattern))
@property
def name_is_reference(self):
"""Check if the file name matches the conditions for the file to
be a reference file (not a reftest)"""
return self.type_flag in ("ref", "notref")
@property
def markup_type(self):
"""Return the type of markup contained in a file, based on its extension,
or None if it doesn't contain markup"""
ext = self.ext
if not ext:
return None
if ext[0] == ".":
ext = ext[1:]
if ext in ["html", "htm"]:
return "html"
if ext in ["xhtml", "xht"]:
return "xhtml"
if ext == "svg":
return "svg"
return None
@cached_property
def root(self):
"""Return an ElementTree Element for the root node of the file if it contains
markup, or None if it does not"""
if not self.markup_type:
return None
parser = self.parsers[self.markup_type]
with self.open() as f:
try:
tree = parser(f)
except Exception:
return None
if hasattr(tree, "getroot"):
root = tree.getroot()
else:
root = tree
return root
@cached_property
def timeout_nodes(self):
"""List of ElementTree Elements corresponding to nodes in a test that
specify timeouts"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='timeout']")
@cached_property
def timeout(self):
"""The timeout of a test or reference file. "long" if the file has an extended timeout
or None otherwise"""
if not self.root:
return
if self.timeout_nodes:
timeout_str = self.timeout_nodes[0].attrib.get("content", None)
if timeout_str and timeout_str.lower() == "long":
return timeout_str
@cached_property
def testharness_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
testharness.js script"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharness.js']")
@cached_property
def content_is_testharness(self):
"""Boolean indicating whether the file content represents a
testharness.js test"""
if not self.root:
return None
return bool(self.testharness_nodes)
@cached_property
def variant_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
test variant"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='variant']")
@cached_property
def test_variants(self):
rv = []
for element in self.variant_nodes:
if "content" in element.attrib:
variant = element.attrib["content"]
assert variant == "" or variant[0] in ["#", "?"]
rv.append(variant)
if not rv:
rv = [""]
return rv
@cached_property
def reftest_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
to a reftest """
if not self.root:
return []
match_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='match']")
mismatch_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='mismatch']")
return match_links + mismatch_links
@cached_property
def references(self):
"""List of (ref_url, relation) tuples for any reftest references specified in
the file"""
rv = []
rel_map = {"match": "==", "mismatch": "!="}
for item in self.reftest_nodes:
if "href" in item.attrib:
ref_url = urlparse.urljoin(self.url, item.attrib["href"])
ref_type = rel_map[item.attrib["rel"]]
rv.append((ref_url, ref_type))
return rv
@cached_property
def content_is_ref_node(self):
"""Boolean indicating whether the file is a non-leaf node in a reftest
graph (i.e. if it contains any """
return bool(self.references)
def manifest_items(self):
"""List of manifest items corresponding to the file. There is typically one
per test, but in the case of reftests a node may have corresponding manifest
items without being a test itself."""
if self.name_is_non_test:
rv = []
elif self.name_is_stub:
rv = [Stub(self, self.url)]
elif self.name_is_manual:
rv = [ManualTest(self, self.url)]
elif self.name_is_worker:
rv = [TestharnessTest(self, self.url[:-3])]
elif self.name_is_webdriver:
rv = [WebdriverSpecTest(self)]
elif self.content_is_testharness:
rv = []
for variant in self.test_variants:
url = self.url + variant
rv.append(TestharnessTest(self, url, timeout=self.timeout))
elif self.content_is_ref_node:
rv = [RefTest(self, self.url, self.references, timeout=self.timeout)]
else:
# If nothing else it's a helper file, which we don't have a specific type for
rv = []
return rv
# -----------------------------------------------------------------------
# Standalone and testing code
import sys
try:
import pywraps
pywraps_there = True
print "Using pywraps"
except:
pywraps_there = False
print "Not using pywraps"
try:
import _idaapi
except:
print "Please try me from inside IDA"
sys.exit(0)
import struct
if pywraps_there:
_idaapi.register_custom_data_type = pywraps.register_custom_data_type
_idaapi.unregister_custom_data_type = pywraps.unregister_custom_data_type
_idaapi.register_custom_data_format = pywraps.register_custom_data_format
_idaapi.unregister_custom_data_format = pywraps.unregister_custom_data_format
_idaapi.get_custom_data_format = pywraps.get_custom_data_format
_idaapi.get_custom_data_type = pywraps.get_custom_data_type
# -----------------------------------------------------------------------
#
DTP_NODUP = 0x0001
class data_type_t(object):
"""
Custom data type definition. All data types should inherit from this class.
"""
def __init__(self, name, value_size = 0, menu_name = None, hotkey = None, asm_keyword = None, props = 0):
"""Please refer to bytes.hpp / data_type_t in the SDK"""
self.name = name
self.props = props
self.menu_name = menu_name
self.hotkey = hotkey
self.asm_keyword = asm_keyword
self.value_size = value_size
self.id = -1 # Will be initialized after registration
"""Contains the data type id after the data type is registered"""
def register(self):
"""Registers the data type and returns the type id or < 0 on failure"""
return _idaapi.register_custom_data_type(self)
def unregister(self):
"""Unregisters the data type and returns True on success"""
# Not registered?
if self.id < 0:
return True
# Try to unregister
r = _idaapi.unregister_custom_data_type(self.id)
# Clear the ID
if r:
self.id = -1
return r
#
# def may_create_at(self, ea, nbytes):
# """
# (optional) If this callback is not defined then this means always may create data type at the given ea.
# @param ea: address of the future item
# @param nbytes: size of the future item
# @return: Boolean
# """
#
# return False
#
# def calc_item_size(self, ea, maxsize):
# """
# (optional) If this callback is defined it means variable size datatype
# This function is used to determine size of the (possible) item at 'ea'
# @param ea: address of the item
# @param maxsize: maximal size of the item
# @return: integer
# Returns: 0-no such item can be created/displayed
# this callback is required only for varsize datatypes
# """
# return 0
#
# -----------------------------------------------------------------------
# Uncomment the corresponding callbacks in the inherited class
class data_format_t(object):
"""Information about a data format"""
def __init__(self, name, value_size = 0, menu_name = None, props = 0, hotkey = None, text_width = 0):
"""Custom data format definition.
@param name: Format name, must be unique
@param menu_name: Visible format name to use in menus
@param props: properties (currently 0)
@param hotkey: Hotkey for the corresponding menu item
@param value_size: size of the value in bytes. 0 means any size is ok
@text_width: Usual width of the text representation
"""
self.name = name
self.menu_name = menu_name
self.props = props
self.hotkey = hotkey
self.value_size = value_size
self.text_width = text_width
self.id = -1 # Will be initialized after registration
"""contains the format id after the format gets registered"""
def register(self, dtid):
"""Registers the data format with the given data type id and returns the type id or < 0 on failure"""
return _idaapi.register_custom_data_format(dtid, self)
def unregister(self, dtid):
"""Unregisters the data format with the given data type id"""
# Not registered?
if self.id < 0:
return True
# Unregister
r = _idaapi.unregister_custom_data_format(dtid, self.id)
# Clear the ID
if r:
self.id = -1
return r
#
# def printf(self, value, current_ea, operand_num, dtid):
# """
# Convert a value buffer to colored string.
#
# @param value: The value to be printed
# @param current_ea: The ea of the value
# @param operand_num: The affected operand
# @param dtid: custom data type id (0-standard built-in data type)
# @return: a colored string representing the passed 'value' or None on failure
# """
# return None
#
# def scan(self, input, current_ea, operand_num):
# """
# Convert from uncolored string 'input' to byte value
#
# @param input: input string
# @param current_ea: current address (BADADDR if unknown)
# @param operand_num: current operand number (-1 if unknown)
#
# @return: tuple (Boolean, string)
# - (False, ErrorMessage) if conversion fails
# - (True, Value buffer) if conversion succeeds
# """
# return (False, "Not implemented")
#
# def analyze(self, current_ea, operand_num):
# """
# (optional) Analyze custom data format occurrence.
# It can be used to create xrefs from the current item.
#
# @param current_ea: current address (BADADDR if unknown)
# @param operand_num: current operand number
# @return: None
# """
#
# pass
#
# -----------------------------------------------------------------------
def __walk_types_and_formats(formats, type_action, format_action, installing):
broken = False
for f in formats:
if len(f) == 1:
if not format_action(f[0], 0):
broken = True
break
else:
dt = f[0]
dfs = f[1:]
# install data type before installing formats
if installing and not type_action(dt):
broken = True
break
# process formats using the correct dt.id
for df in dfs:
if not format_action(df, dt.id):
broken = True
break
# uninstall data type after uninstalling formats
if not installing and not type_action(dt):
broken = True
break
return not broken
# -----------------------------------------------------------------------
def register_data_types_and_formats(formats):
"""
Registers multiple data types and formats at once.
To register one type/format at a time use register_custom_data_type/register_custom_data_format
It employs a special table of types and formats described below:
The 'formats' is a list of tuples. If a tuple has one element then it is the format to be registered with dtid=0
If the tuple has more than one element, then tuple[0] is the data type and tuple[1:] are the data formats. For example:
many_formats = [
(pascal_data_type(), pascal_data_format()),
(simplevm_data_type(), simplevm_data_format()),
(makedword_data_format(),),
(simplevm_data_format(),)
]
The first two tuples describe data types and their associated formats.
The last two tuples describe two data formats to be used with built-in data types.
"""
def __reg_format(df, dtid):
df.register(dtid)
if dtid == 0:
print "Registered format '%s' with built-in types, ID=%d" % (df.name, df.id)
else:
print " Registered format '%s', ID=%d (dtid=%d)" % (df.name, df.id, dtid)
return df.id != -1
def __reg_type(dt):
dt.register()
print "Registered type '%s', ID=%d" % (dt.name, dt.id)
return dt.id != -1
ok = __walk_types_and_formats(formats, __reg_type, __reg_format, True)
return 1 if ok else -1
# -----------------------------------------------------------------------
def unregister_data_types_and_formats(formats):
"""As opposed to register_data_types_and_formats(), this function
unregisters multiple data types and formats at once.
"""
def __unreg_format(df, dtid):
print "%snregistering format '%s'" % ("U" if dtid == 0 else " u", df.name)
df.unregister(dtid)
return True
def __unreg_type(dt):
print "Unregistering type '%s', ID=%d" % (dt.name, dt.id)
dt.unregister()
return True
ok = __walk_types_and_formats(formats, __unreg_type, __unreg_format, False)
return 1 if ok else -1
#
# -----------------------------------------------------------------------
#!/usr/bin/env python
import sys
import unittest
from ZSI import TC, ParsedSoap, ParseException, FaultFromZSIException, FaultFromException, SoapWriter
class t2TestCase(unittest.TestCase):
"Test case wrapper for old ZSI t2 test case"
def checkt2(self):
try:
ps = ParsedSoap(IN)
except ParseException, e:
print >>OUT, FaultFromZSIException(e).AsSOAP()
self.fail()
except Exception, e:
# Faulted while processing; assume it's in the
# header.
print >>OUT, FaultFromException(e, 1).AsSOAP()
self.fail()
# We are not prepared to handle any actors or mustUnderstand elements.
# Arbitrary fault back with the first one found.
a = ps.WhatActorsArePresent()
if len(a):
print >>OUT, FaultFromActor(a[0]).AsSOAP()
self.fail()
mu = ps.WhatMustIUnderstand()
if len(mu):
uri, localname = mu[0]
print >>OUT, FaultFromNotUnderstood(uri, localname).AsSOAP()
self.fail()
try:
player = ps.Parse(Player)
except EvaluateException, e:
print >>OUT, FaultFromZSIException(e).AsSOAP()
self.fail()
try:
import operator
total = reduce(operator.add, player.Scores, 0)
result = Average(foo(total, len(player.Scores)))
sw = SoapWriter().serialize(result)
str(sw)
#print >>OUT, str(sw)
except Exception, e:
print >>OUT, FaultFromException(e, 0, sys.exc_info()[2]).AsSOAP()
self.fail()
def makeTestSuite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(t2TestCase, "check"))
return suite
class Player:
'''Input class.'''
def __init__(self, name=None):
pass
Player.typecode = TC.Struct(Player, [ TC.String('Name', optional=1),
TC.Array('xsd:integer', TC.Integer(),
'Scores'), ], 'GetAverage')
class Average:
'''Output class.'''
def __init__(self, average):
self.average = average
Average.typecode = TC.Struct(Average, [ TC.Integer('average'),
], 'GetAverageResponse', inline=1)
def bar(total, len):
return total / len
def foo(total, len):
return bar(total, len)
OUT = sys.stdout
IN='''841012004John Doe'''
def main():
unittest.main(defaultTest="makeTestSuite")
if __name__ == "__main__" : main()
#!/usr/bin/python
import os.path
import plistlib
import sys
import reports
# Print the usage text if needed
if len(sys.argv) != 2:
print "Usage: python wikifiy-summary.py Summary.plist"
sys.exit()
# Read in the file they named
infile = open(sys.argv[1], "r")
data = infile.read()
infile.close()
# We want an array of reports, but we also want direct access to the plist
plist = plistlib.readPlistFromString(data)
reportlist = reports.summaryPlistToReports(data)
# Print out the header
print """== Static Analysis of Adium ==
This page lists the reports generated by [http://clang.llvm.org/StaticAnalysis.html Clang Static Analyzer] as run on r%(revision)s. This page can be used by developers to investigate each report and make a ruling on its validity.
=== Key ===
|| ||No ruling||
||[ticket:6 Y]||Confirmed report, ticket open||
||[ticket:1337 Y]||Confirmed report, ticket closed||
||~~[changeset:4 F]~~||Confirmed report, fixed without a ticket||
||N||False positive||
=== Reports ===
|| ||Data||Comment||""" % { "revision" : plist["Revision"] }
# Now print out each report
for r in reportlist:
print "|| ||%(bugtype)s in [http://rgov.org/adium/r%(revision)s/%(reportfile)s %(sourcefile)s:%(linenum)i]|| ||" % {
"bugtype" : r.type,
"revision" : plist["Revision"],
"reportfile" : r.reportfile,
"sourcefile" : os.path.basename(r.sourcefile),
"linenum" : r.endpathline,
}
from itertools import izip
import theano
import theano.tensor as T
import numpy as np
import utils as U
from parameters import Parameters
def clip(magnitude):
def clipper(deltas):
grads_norms = [T.sqrt(T.sum(T.sqr(g))) for g in deltas]
return [
T.switch(
T.gt(n, magnitude),
magnitude * (g / n), g
) for n, g in zip(grads_norms, deltas)
]
return clipper
def myclip(magnitude):
def clipper(deltas):
grads_norms = [T.sqrt(T.sum(T.sqr(g))) for g in deltas]
max_norm = T.max(grads_norms)
return [
T.switch(
T.gt(max_norm, magnitude),
magnitude * (g / max_norm), g
) for g in deltas
]
return clipper
def track_parameters(update_fun):
def decorated_fun(parameters, gradients, **kwargs):
if "P" not in kwargs:
kwargs["P"] = Parameters()
if "delta_preprocess" in kwargs:
delta_preprocess = kwargs["delta_preprocess"]
del kwargs["delta_preprocess"]
else:
delta_preprocess = lambda x: x
deltas, updates = update_fun(parameters, gradients, **kwargs)
deltas = delta_preprocess(deltas)
assert(len(deltas) == len(parameters))
return zip(parameters, (p - d for p, d in izip(parameters, deltas))) + updates
return decorated_fun
def create_param(P, name, w):
P[name] = w
return P[name]
def get_shapes(parameters):
return [p.get_value().shape for p in parameters]
@track_parameters
def adadelta(parameters, gradients, rho=np.float32(0.95), learning_rate=np.float32(1e-4), P=None):
eps = learning_rate
shapes = get_shapes(parameters)
acc_gradients_sq = [create_param(
P, "grad_sq_" + p.name, np.zeros(s)) for p, s in izip(parameters, shapes)]
acc_deltas_sq = [create_param(P, "deltas_sq_" + p.name, np.zeros(s))
for p, s in izip(parameters, shapes)]
gradients_sq = [T.sqr(g) for g in gradients]
gradients_sq_new = [rho * acc_g_sq + (np.float32(1.) - rho) *
g_sq for acc_g_sq, g_sq in izip(acc_gradients_sq, gradients_sq)]
learning_rate_sq = [(d_sq + eps) / (g_sq + eps)
for d_sq, g_sq in izip(acc_deltas_sq, gradients_sq_new)]
deltas_sq = [lr_sq * g_sq for lr_sq,
g_sq in izip(learning_rate_sq, gradients_sq)]
deltas_sq_new = [rho * acc_d_sq + (np.float32(1.) - rho) *
d_sq for acc_d_sq, d_sq in izip(acc_deltas_sq, deltas_sq)]
deltas = [T.sqrt(lr_sq) * g for lr_sq,
g in izip(learning_rate_sq, gradients)]
gradient_sq_updates = zip(acc_gradients_sq, gradients_sq_new)
deltas_sq_updates = zip(acc_deltas_sq, deltas_sq_new)
return deltas, gradient_sq_updates + deltas_sq_updates
@track_parameters
def adagrad(parameters, gradients, learning_rate=1e-4, P=None):
shapes = get_shapes(parameters)
grad_sq = [create_param(P, "acc_sq_" + p.name, np.zeros(s))
for p, s in izip(parameters, shapes)]
grad_sq_new = [g_sq + g**2 for g, g_sq in izip(gradients, grad_sq)]
deltas = [learning_rate * g / T.sqrt(g_sq + 1e-6)
for g, g_sq in izip(gradients, grad_sq_new)]
grad_sq_update = zip(grad_sq, grad_sq_new)
return deltas, grad_sq_update
@track_parameters
def momentum(parameters, gradients, mu=0.9, learning_rate=1e-3, P=None):
eps = learning_rate
P.t = 1
m = (1 - 3.0 / (P.t + 5) < mu)
mu = m * (1 - 3.0 / (P.t + 5)) + (1 - m) * mu
shapes = get_shapes(parameters)
deltas = [create_param(P, "deltas_" + p.name, np.zeros(s))
for p, s in izip(parameters, shapes)]
delta_nexts = [mu * delta + eps * grad for delta,
grad in zip(deltas, gradients)]
delta_updates = [(delta, delta_next)
for delta, delta_next in zip(deltas, delta_nexts)]
return delta_nexts, delta_updates + [(P.t, P.t + 1)]
@track_parameters
def rmsprop(parameters, gradients, discount=0.95, momentum=0.9, learning_rate=1e-4, epsilon=1e-4, P=None):
shapes = get_shapes(parameters)
sq_acc = [create_param(P, "sq_acc_" + p.name, np.zeros(s))
for p, s in izip(parameters, shapes)]
acc = [create_param(P, "acc_" + p.name, np.zeros(s))
for p, s in izip(parameters, shapes)]
delta_acc = [create_param(P, "delta_acc_" + p.name, np.zeros(s))
for p, s in izip(parameters, shapes)]
sq_avg = [discount * sq_a + (1 - discount) * (g**2)
for sq_a, g in izip(sq_acc, gradients)]
avg = [discount * a + (1 - discount) * g for a,
g in izip(acc, gradients)]
scaled_grads = [g / T.sqrt(sq_a - a**2 + epsilon)
for g, a, sq_a in izip(gradients, acc, sq_acc)]
deltas = [momentum * d_a + learning_rate *
s_g for d_a, s_g in izip(delta_acc, scaled_grads)]
sq_acc_updates = [(sq_a, sq_aa) for sq_a, sq_aa in izip(sq_acc, sq_avg)]
acc_updates = [(a, aa) for a, aa in izip(acc, avg)]
delta_updates = [(d_a, d) for d_a, d in izip(delta_acc, deltas)]
parameters_updates = [(p, p - d) for p, d in izip(parameters, deltas)]
return deltas, acc_updates + sq_acc_updates + delta_updates
data = (
'pyuk', # 0x00
'pyut', # 0x01
'pyup', # 0x02
'pyuh', # 0x03
'peu', # 0x04
'peug', # 0x05
'peugg', # 0x06
'peugs', # 0x07
'peun', # 0x08
'peunj', # 0x09
'peunh', # 0x0a
'peud', # 0x0b
'peul', # 0x0c
'peulg', # 0x0d
'peulm', # 0x0e
'peulb', # 0x0f
'peuls', # 0x10
'peult', # 0x11
'peulp', # 0x12
'peulh', # 0x13
'peum', # 0x14
'peub', # 0x15
'peubs', # 0x16
'peus', # 0x17
'peuss', # 0x18
'peung', # 0x19
'peuj', # 0x1a
'peuc', # 0x1b
'peuk', # 0x1c
'peut', # 0x1d
'peup', # 0x1e
'peuh', # 0x1f
'pyi', # 0x20
'pyig', # 0x21
'pyigg', # 0x22
'pyigs', # 0x23
'pyin', # 0x24
'pyinj', # 0x25
'pyinh', # 0x26
'pyid', # 0x27
'pyil', # 0x28
'pyilg', # 0x29
'pyilm', # 0x2a
'pyilb', # 0x2b
'pyils', # 0x2c
'pyilt', # 0x2d
'pyilp', # 0x2e
'pyilh', # 0x2f
'pyim', # 0x30
'pyib', # 0x31
'pyibs', # 0x32
'pyis', # 0x33
'pyiss', # 0x34
'pying', # 0x35
'pyij', # 0x36
'pyic', # 0x37
'pyik', # 0x38
'pyit', # 0x39
'pyip', # 0x3a
'pyih', # 0x3b
'pi', # 0x3c
'pig', # 0x3d
'pigg', # 0x3e
'pigs', # 0x3f
'pin', # 0x40
'pinj', # 0x41
'pinh', # 0x42
'pid', # 0x43
'pil', # 0x44
'pilg', # 0x45
'pilm', # 0x46
'pilb', # 0x47
'pils', # 0x48
'pilt', # 0x49
'pilp', # 0x4a
'pilh', # 0x4b
'pim', # 0x4c
'pib', # 0x4d
'pibs', # 0x4e
'pis', # 0x4f
'piss', # 0x50
'ping', # 0x51
'pij', # 0x52
'pic', # 0x53
'pik', # 0x54
'pit', # 0x55
'pip', # 0x56
'pih', # 0x57
'ha', # 0x58
'hag', # 0x59
'hagg', # 0x5a
'hags', # 0x5b
'han', # 0x5c
'hanj', # 0x5d
'hanh', # 0x5e
'had', # 0x5f
'hal', # 0x60
'halg', # 0x61
'halm', # 0x62
'halb', # 0x63
'hals', # 0x64
'halt', # 0x65
'halp', # 0x66
'halh', # 0x67
'ham', # 0x68
'hab', # 0x69
'habs', # 0x6a
'has', # 0x6b
'hass', # 0x6c
'hang', # 0x6d
'haj', # 0x6e
'hac', # 0x6f
'hak', # 0x70
'hat', # 0x71
'hap', # 0x72
'hah', # 0x73
'hae', # 0x74
'haeg', # 0x75
'haegg', # 0x76
'haegs', # 0x77
'haen', # 0x78
'haenj', # 0x79
'haenh', # 0x7a
'haed', # 0x7b
'hael', # 0x7c
'haelg', # 0x7d
'haelm', # 0x7e
'haelb', # 0x7f
'haels', # 0x80
'haelt', # 0x81
'haelp', # 0x82
'haelh', # 0x83
'haem', # 0x84
'haeb', # 0x85
'haebs', # 0x86
'haes', # 0x87
'haess', # 0x88
'haeng', # 0x89
'haej', # 0x8a
'haec', # 0x8b
'haek', # 0x8c
'haet', # 0x8d
'haep', # 0x8e
'haeh', # 0x8f
'hya', # 0x90
'hyag', # 0x91
'hyagg', # 0x92
'hyags', # 0x93
'hyan', # 0x94
'hyanj', # 0x95
'hyanh', # 0x96
'hyad', # 0x97
'hyal', # 0x98
'hyalg', # 0x99
'hyalm', # 0x9a
'hyalb', # 0x9b
'hyals', # 0x9c
'hyalt', # 0x9d
'hyalp', # 0x9e
'hyalh', # 0x9f
'hyam', # 0xa0
'hyab', # 0xa1
'hyabs', # 0xa2
'hyas', # 0xa3
'hyass', # 0xa4
'hyang', # 0xa5
'hyaj', # 0xa6
'hyac', # 0xa7
'hyak', # 0xa8
'hyat', # 0xa9
'hyap', # 0xaa
'hyah', # 0xab
'hyae', # 0xac
'hyaeg', # 0xad
'hyaegg', # 0xae
'hyaegs', # 0xaf
'hyaen', # 0xb0
'hyaenj', # 0xb1
'hyaenh', # 0xb2
'hyaed', # 0xb3
'hyael', # 0xb4
'hyaelg', # 0xb5
'hyaelm', # 0xb6
'hyaelb', # 0xb7
'hyaels', # 0xb8
'hyaelt', # 0xb9
'hyaelp', # 0xba
'hyaelh', # 0xbb
'hyaem', # 0xbc
'hyaeb', # 0xbd
'hyaebs', # 0xbe
'hyaes', # 0xbf
'hyaess', # 0xc0
'hyaeng', # 0xc1
'hyaej', # 0xc2
'hyaec', # 0xc3
'hyaek', # 0xc4
'hyaet', # 0xc5
'hyaep', # 0xc6
'hyaeh', # 0xc7
'heo', # 0xc8
'heog', # 0xc9
'heogg', # 0xca
'heogs', # 0xcb
'heon', # 0xcc
'heonj', # 0xcd
'heonh', # 0xce
'heod', # 0xcf
'heol', # 0xd0
'heolg', # 0xd1
'heolm', # 0xd2
'heolb', # 0xd3
'heols', # 0xd4
'heolt', # 0xd5
'heolp', # 0xd6
'heolh', # 0xd7
'heom', # 0xd8
'heob', # 0xd9
'heobs', # 0xda
'heos', # 0xdb
'heoss', # 0xdc
'heong', # 0xdd
'heoj', # 0xde
'heoc', # 0xdf
'heok', # 0xe0
'heot', # 0xe1
'heop', # 0xe2
'heoh', # 0xe3
'he', # 0xe4
'heg', # 0xe5
'hegg', # 0xe6
'hegs', # 0xe7
'hen', # 0xe8
'henj', # 0xe9
'henh', # 0xea
'hed', # 0xeb
'hel', # 0xec
'helg', # 0xed
'helm', # 0xee
'helb', # 0xef
'hels', # 0xf0
'helt', # 0xf1
'help', # 0xf2
'helh', # 0xf3
'hem', # 0xf4
'heb', # 0xf5
'hebs', # 0xf6
'hes', # 0xf7
'hess', # 0xf8
'heng', # 0xf9
'hej', # 0xfa
'hec', # 0xfb
'hek', # 0xfc
'het', # 0xfd
'hep', # 0xfe
'heh', # 0xff
)
from nose.tools import eq_ as eq
import logging
from ConfigParser import RawConfigParser
from gitosis import access
def test_write_no_simple():
cfg = RawConfigParser()
eq(access.haveAccess(config=cfg, user='jdoe', mode='writable', path='foo/bar'),
None)
def test_write_yes_simple():
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'writable', 'foo/bar')
eq(access.haveAccess(config=cfg, user='jdoe', mode='writable', path='foo/bar'),
('repositories', 'foo/bar'))
def test_write_no_simple_wouldHaveReadonly():
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'readonly', 'foo/bar')
eq(access.haveAccess(config=cfg, user='jdoe', mode='writable', path='foo/bar'),
None)
def test_write_yes_map():
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'map writable foo/bar', 'quux/thud')
eq(access.haveAccess(config=cfg, user='jdoe', mode='writable', path='foo/bar'),
('repositories', 'quux/thud'))
def test_write_no_map_wouldHaveReadonly():
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'map readonly foo/bar', 'quux/thud')
eq(access.haveAccess(config=cfg, user='jdoe', mode='writable', path='foo/bar'),
None)
def test_read_no_simple():
cfg = RawConfigParser()
eq(access.haveAccess(config=cfg, user='jdoe', mode='readonly', path='foo/bar'),
None)
def test_read_yes_simple():
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'readonly', 'foo/bar')
eq(access.haveAccess(config=cfg, user='jdoe', mode='readonly', path='foo/bar'),
('repositories', 'foo/bar'))
def test_read_yes_simple_wouldHaveWritable():
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'writable', 'foo/bar')
eq(access.haveAccess(config=cfg, user='jdoe', mode='readonly', path='foo/bar'),
None)
def test_read_yes_map():
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'map readonly foo/bar', 'quux/thud')
eq(access.haveAccess(config=cfg, user='jdoe', mode='readonly', path='foo/bar'),
('repositories', 'quux/thud'))
def test_read_yes_map_wouldHaveWritable():
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'map writable foo/bar', 'quux/thud')
eq(access.haveAccess(config=cfg, user='jdoe', mode='readonly', path='foo/bar'),
None)
def test_read_yes_all():
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', '@all')
cfg.set('group fooers', 'readonly', 'foo/bar')
eq(access.haveAccess(config=cfg, user='jdoe', mode='readonly', path='foo/bar'),
('repositories', 'foo/bar'))
def test_base_global_absolute():
cfg = RawConfigParser()
cfg.add_section('gitosis')
cfg.set('gitosis', 'repositories', '/a/leading/path')
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'map writable foo/bar', 'baz/quux/thud')
eq(access.haveAccess(
config=cfg, user='jdoe', mode='writable', path='foo/bar'),
('/a/leading/path', 'baz/quux/thud'))
def test_base_global_relative():
cfg = RawConfigParser()
cfg.add_section('gitosis')
cfg.set('gitosis', 'repositories', 'some/relative/path')
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'map writable foo/bar', 'baz/quux/thud')
eq(access.haveAccess(
config=cfg, user='jdoe', mode='writable', path='foo/bar'),
('some/relative/path', 'baz/quux/thud'))
def test_base_global_relative_simple():
cfg = RawConfigParser()
cfg.add_section('gitosis')
cfg.set('gitosis', 'repositories', 'some/relative/path')
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'readonly', 'foo xyzzy bar')
eq(access.haveAccess(
config=cfg, user='jdoe', mode='readonly', path='xyzzy'),
('some/relative/path', 'xyzzy'))
def test_base_global_unset():
cfg = RawConfigParser()
cfg.add_section('gitosis')
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'readonly', 'foo xyzzy bar')
eq(access.haveAccess(
config=cfg, user='jdoe', mode='readonly', path='xyzzy'),
('repositories', 'xyzzy'))
def test_base_local():
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'repositories', 'some/relative/path')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'map writable foo/bar', 'baz/quux/thud')
eq(access.haveAccess(
config=cfg, user='jdoe', mode='writable', path='foo/bar'),
('some/relative/path', 'baz/quux/thud'))
def test_dotgit():
# a .git extension is always allowed to be added
cfg = RawConfigParser()
cfg.add_section('group fooers')
cfg.set('group fooers', 'members', 'jdoe')
cfg.set('group fooers', 'writable', 'foo/bar')
eq(access.haveAccess(config=cfg, user='jdoe', mode='writable', path='foo/bar.git'),
('repositories', 'foo/bar'))
"""This test checks for correct wait3() behavior.
"""
import os
import time
from test.fork_wait import ForkWait
from test.test_support import TestSkipped, run_unittest, reap_children
try:
os.fork
except AttributeError:
raise TestSkipped, "os.fork not defined -- skipping test_wait3"
try:
os.wait3
except AttributeError:
raise TestSkipped, "os.wait3 not defined -- skipping test_wait3"
class Wait3Test(ForkWait):
def wait_impl(self, cpid):
for i in range(10):
# wait3() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status, rusage = os.wait3(os.WNOHANG)
if spid == cpid:
break
time.sleep(1.0)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
self.assertTrue(rusage)
def test_main():
run_unittest(Wait3Test)
reap_children()
if __name__ == "__main__":
test_main()
from distutils.command.install_scripts import install_scripts \
as _install_scripts
from pkg_resources import Distribution, PathMetadata, ensure_directory
import os
from distutils import log
class install_scripts(_install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
_install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
from setuptools.command.easy_install import get_script_args
from setuptools.command.easy_install import sys_executable
self.run_command("egg_info")
if self.distribution.scripts:
_install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
executable = getattr(bs_cmd,'executable',sys_executable)
is_wininst = getattr(
self.get_finalized_command("bdist_wininst"), '_is_running', False
)
for args in get_script_args(dist, executable, is_wininst):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target,0755)
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# System Imports
import sys
from twisted.trial import unittest
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Twisted Imports
from twisted.persisted import styles, aot, crefutil
class VersionTestCase(unittest.TestCase):
def testNullVersionUpgrade(self):
global NullVersioned
class NullVersioned:
ok = 0
pkcl = pickle.dumps(NullVersioned())
class NullVersioned(styles.Versioned):
persistenceVersion = 1
def upgradeToVersion1(self):
self.ok = 1
mnv = pickle.loads(pkcl)
styles.doUpgrade()
assert mnv.ok, "initial upgrade not run!"
def testVersionUpgrade(self):
global MyVersioned
class MyVersioned(styles.Versioned):
persistenceVersion = 2
persistenceForgets = ['garbagedata']
v3 = 0
v4 = 0
def __init__(self):
self.somedata = 'xxx'
self.garbagedata = lambda q: 'cant persist'
def upgradeToVersion3(self):
self.v3 += 1
def upgradeToVersion4(self):
self.v4 += 1
mv = MyVersioned()
assert not (mv.v3 or mv.v4), "hasn't been upgraded yet"
pickl = pickle.dumps(mv)
MyVersioned.persistenceVersion = 4
obj = pickle.loads(pickl)
styles.doUpgrade()
assert obj.v3, "didn't do version 3 upgrade"
assert obj.v4, "didn't do version 4 upgrade"
pickl = pickle.dumps(obj)
obj = pickle.loads(pickl)
styles.doUpgrade()
assert obj.v3 == 1, "upgraded unnecessarily"
assert obj.v4 == 1, "upgraded unnecessarily"
def testNonIdentityHash(self):
global ClassWithCustomHash
class ClassWithCustomHash(styles.Versioned):
def __init__(self, unique, hash):
self.unique = unique
self.hash = hash
def __hash__(self):
return self.hash
v1 = ClassWithCustomHash('v1', 0)
v2 = ClassWithCustomHash('v2', 0)
pkl = pickle.dumps((v1, v2))
del v1, v2
ClassWithCustomHash.persistenceVersion = 1
ClassWithCustomHash.upgradeToVersion1 = lambda self: setattr(self, 'upgraded', True)
v1, v2 = pickle.loads(pkl)
styles.doUpgrade()
self.assertEqual(v1.unique, 'v1')
self.assertEqual(v2.unique, 'v2')
self.failUnless(v1.upgraded)
self.failUnless(v2.upgraded)
def testUpgradeDeserializesObjectsRequiringUpgrade(self):
global ToyClassA, ToyClassB
class ToyClassA(styles.Versioned):
pass
class ToyClassB(styles.Versioned):
pass
x = ToyClassA()
y = ToyClassB()
pklA, pklB = pickle.dumps(x), pickle.dumps(y)
del x, y
ToyClassA.persistenceVersion = 1
def upgradeToVersion1(self):
self.y = pickle.loads(pklB)
styles.doUpgrade()
ToyClassA.upgradeToVersion1 = upgradeToVersion1
ToyClassB.persistenceVersion = 1
ToyClassB.upgradeToVersion1 = lambda self: setattr(self, 'upgraded', True)
x = pickle.loads(pklA)
styles.doUpgrade()
self.failUnless(x.y.upgraded)
class VersionedSubClass(styles.Versioned):
pass
class SecondVersionedSubClass(styles.Versioned):
pass
class VersionedSubSubClass(VersionedSubClass):
pass
class VersionedDiamondSubClass(VersionedSubSubClass, SecondVersionedSubClass):
pass
class AybabtuTests(unittest.TestCase):
"""
L{styles._aybabtu} gets all of classes in the inheritance hierarchy of its
argument that are strictly between L{Versioned} and the class itself.
"""
def test_aybabtuStrictEmpty(self):
"""
L{styles._aybabtu} of L{Versioned} itself is an empty list.
"""
self.assertEqual(styles._aybabtu(styles.Versioned), [])
def test_aybabtuStrictSubclass(self):
"""
There are no classes I{between} L{VersionedSubClass} and L{Versioned},
so L{styles._aybabtu} returns an empty list.
"""
self.assertEqual(styles._aybabtu(VersionedSubClass), [])
def test_aybabtuSubsubclass(self):
"""
With a sub-sub-class of L{Versioned}, L{styles._aybabtu} returns a list
containing the intervening subclass.
"""
self.assertEqual(styles._aybabtu(VersionedSubSubClass),
[VersionedSubClass])
def test_aybabtuStrict(self):
"""
For a diamond-shaped inheritance graph, L{styles._aybabtu} returns a
list containing I{both} intermediate subclasses.
"""
self.assertEqual(
styles._aybabtu(VersionedDiamondSubClass),
[VersionedSubSubClass, VersionedSubClass, SecondVersionedSubClass])
class MyEphemeral(styles.Ephemeral):
def __init__(self, x):
self.x = x
class EphemeralTestCase(unittest.TestCase):
def testEphemeral(self):
o = MyEphemeral(3)
self.assertEqual(o.__class__, MyEphemeral)
self.assertEqual(o.x, 3)
pickl = pickle.dumps(o)
o = pickle.loads(pickl)
self.assertEqual(o.__class__, styles.Ephemeral)
self.assert_(not hasattr(o, 'x'))
class Pickleable:
def __init__(self, x):
self.x = x
def getX(self):
return self.x
class A:
"""
dummy class
"""
def amethod(self):
pass
class B:
"""
dummy class
"""
def bmethod(self):
pass
def funktion():
pass
class PicklingTestCase(unittest.TestCase):
"""Test pickling of extra object types."""
def testModule(self):
pickl = pickle.dumps(styles)
o = pickle.loads(pickl)
self.assertEqual(o, styles)
def testClassMethod(self):
pickl = pickle.dumps(Pickleable.getX)
o = pickle.loads(pickl)
self.assertEqual(o, Pickleable.getX)
def testInstanceMethod(self):
obj = Pickleable(4)
pickl = pickle.dumps(obj.getX)
o = pickle.loads(pickl)
self.assertEqual(o(), 4)
self.assertEqual(type(o), type(obj.getX))
def testStringIO(self):
f = StringIO.StringIO()
f.write("abc")
pickl = pickle.dumps(f)
o = pickle.loads(pickl)
self.assertEqual(type(o), type(f))
self.assertEqual(f.getvalue(), "abc")
class EvilSourceror:
def __init__(self, x):
self.a = self
self.a.b = self
self.a.b.c = x
class NonDictState:
def __getstate__(self):
return self.state
def __setstate__(self, state):
self.state = state
class AOTTestCase(unittest.TestCase):
def testSimpleTypes(self):
obj = (1, 2.0, 3j, True, slice(1, 2, 3), 'hello', u'world', sys.maxint + 1, None, Ellipsis)
rtObj = aot.unjellyFromSource(aot.jellyToSource(obj))
self.assertEqual(obj, rtObj)
def testMethodSelfIdentity(self):
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
im_ = aot.unjellyFromSource(aot.jellyToSource(b)).a.bmethod
self.assertEqual(im_.im_class, im_.im_self.__class__)
def test_methodNotSelfIdentity(self):
"""
If a class change after an instance has been created,
L{aot.unjellyFromSource} shoud raise a C{TypeError} when trying to
unjelly the instance.
"""
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
savedbmethod = B.bmethod
del B.bmethod
try:
self.assertRaises(TypeError, aot.unjellyFromSource,
aot.jellyToSource(b))
finally:
B.bmethod = savedbmethod
def test_unsupportedType(self):
"""
L{aot.jellyToSource} should raise a C{TypeError} when trying to jelly
an unknown type.
"""
try:
set
except:
from sets import Set as set
self.assertRaises(TypeError, aot.jellyToSource, set())
def testBasicIdentity(self):
# Anyone wanting to make this datastructure more complex, and thus this
# test more comprehensive, is welcome to do so.
aj = aot.AOTJellier().jellyToAO
d = {'hello': 'world', "method": aj}
l = [1, 2, 3,
"he\tllo\n\n\"x world!",
u"goodbye \n\t\u1010 world!",
1, 1.0, 100 ** 100l, unittest, aot.AOTJellier, d,
funktion
]
t = tuple(l)
l.append(l)
l.append(t)
l.append(t)
uj = aot.unjellyFromSource(aot.jellyToSource([l, l]))
assert uj[0] is uj[1]
assert uj[1][0:5] == l[0:5]
def testNonDictState(self):
a = NonDictState()
a.state = "meringue!"
assert aot.unjellyFromSource(aot.jellyToSource(a)).state == a.state
def testCopyReg(self):
s = "foo_bar"
sio = StringIO.StringIO()
sio.write(s)
uj = aot.unjellyFromSource(aot.jellyToSource(sio))
# print repr(uj.__dict__)
assert uj.getvalue() == s
def testFunkyReferences(self):
o = EvilSourceror(EvilSourceror([]))
j1 = aot.jellyToAOT(o)
oj = aot.unjellyFromAOT(j1)
assert oj.a is oj
assert oj.a.b is oj.b
assert oj.c is not oj.c.c
class CrefUtilTestCase(unittest.TestCase):
"""
Tests for L{crefutil}.
"""
def test_dictUnknownKey(self):
"""
L{crefutil._DictKeyAndValue} only support keys C{0} and C{1}.
"""
d = crefutil._DictKeyAndValue({})
self.assertRaises(RuntimeError, d.__setitem__, 2, 3)
def test_deferSetMultipleTimes(self):
"""
L{crefutil._Defer} can be assigned a key only one time.
"""
d = crefutil._Defer()
d[0] = 1
self.assertRaises(RuntimeError, d.__setitem__, 0, 1)
testCases = [VersionTestCase, EphemeralTestCase, PicklingTestCase]
# -*- test-case-name: twisted.web.test.test_web -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of the lowest-level Resource class.
"""
from __future__ import division, absolute_import
__all__ = [
'IResource', 'getChildForRequest',
'Resource', 'ErrorPage', 'NoResource', 'ForbiddenResource',
'EncodingResourceWrapper']
import warnings
from zope.interface import Attribute, Interface, implementer
from twisted.python.compat import nativeString, unicode
from twisted.python.reflect import prefixedMethodNames
from twisted.python.components import proxyForInterface
from twisted.web._responses import FORBIDDEN, NOT_FOUND
from twisted.web.error import UnsupportedMethod
class IResource(Interface):
"""
A web resource.
"""
isLeaf = Attribute(
"""
Signal if this IResource implementor is a "leaf node" or not. If True,
getChildWithDefault will not be called on this Resource.
""")
def getChildWithDefault(name, request):
"""
Return a child with the given name for the given request.
This is the external interface used by the Resource publishing
machinery. If implementing IResource without subclassing
Resource, it must be provided. However, if subclassing Resource,
getChild overridden instead.
@param name: A single path component from a requested URL. For example,
a request for I{http://example.com/foo/bar} will result in calls to
this method with C{b"foo"} and C{b"bar"} as values for this
argument.
@type name: C{bytes}
@param request: A representation of all of the information about the
request that is being made for this child.
@type request: L{twisted.web.server.Request}
"""
def putChild(path, child):
"""
Put a child IResource implementor at the given path.
@param path: A single path component, to be interpreted relative to the
path this resource is found at, at which to put the given child.
For example, if resource A can be found at I{http://example.com/foo}
then a call like C{A.putChild(b"bar", B)} will make resource B
available at I{http://example.com/foo/bar}.
@type path: C{bytes}
"""
def render(request):
"""
Render a request. This is called on the leaf resource for a request.
@return: Either C{server.NOT_DONE_YET} to indicate an asynchronous or a
C{bytes} instance to write as the response to the request. If
C{NOT_DONE_YET} is returned, at some point later (for example, in a
Deferred callback) call C{request.write(b"")} to write data to
the request, and C{request.finish()} to send the data to the
browser.
@raise twisted.web.error.UnsupportedMethod: If the HTTP verb
requested is not supported by this resource.
"""
def getChildForRequest(resource, request):
"""
Traverse resource tree to find who will handle the request.
"""
while request.postpath and not resource.isLeaf:
pathElement = request.postpath.pop(0)
request.prepath.append(pathElement)
resource = resource.getChildWithDefault(pathElement, request)
return resource
@implementer(IResource)
class Resource:
"""
Define a web-accessible resource.
This serves 2 main purposes; one is to provide a standard representation
for what HTTP specification calls an 'entity', and the other is to provide
an abstract directory structure for URL retrieval.
"""
entityType = IResource
server = None
def __init__(self):
"""
Initialize.
"""
self.children = {}
isLeaf = 0
### Abstract Collection Interface
def listStaticNames(self):
return list(self.children.keys())
def listStaticEntities(self):
return list(self.children.items())
def listNames(self):
return list(self.listStaticNames()) + self.listDynamicNames()
def listEntities(self):
return list(self.listStaticEntities()) + self.listDynamicEntities()
def listDynamicNames(self):
return []
def listDynamicEntities(self, request=None):
return []
def getStaticEntity(self, name):
return self.children.get(name)
def getDynamicEntity(self, name, request):
if not self.children.has_key(name):
return self.getChild(name, request)
else:
return None
def delEntity(self, name):
del self.children[name]
def reallyPutEntity(self, name, entity):
self.children[name] = entity
# Concrete HTTP interface
def getChild(self, path, request):
"""
Retrieve a 'child' resource from me.
Implement this to create dynamic resource generation -- resources which
are always available may be registered with self.putChild().
This will not be called if the class-level variable 'isLeaf' is set in
your subclass; instead, the 'postpath' attribute of the request will be
left as a list of the remaining path elements.
For example, the URL /foo/bar/baz will normally be::
| site.resource.getChild('foo').getChild('bar').getChild('baz').
However, if the resource returned by 'bar' has isLeaf set to true, then
the getChild call will never be made on it.
Parameters and return value have the same meaning and requirements as
those defined by L{IResource.getChildWithDefault}.
"""
return NoResource("No such child resource.")
def getChildWithDefault(self, path, request):
"""
Retrieve a static or dynamically generated child resource from me.
First checks if a resource was added manually by putChild, and then
call getChild to check for dynamic resources. Only override if you want
to affect behaviour of all child lookups, rather than just dynamic
ones.
This will check to see if I have a pre-registered child resource of the
given name, and call getChild if I do not.
@see: L{IResource.getChildWithDefault}
"""
if path in self.children:
return self.children[path]
return self.getChild(path, request)
def getChildForRequest(self, request):
warnings.warn("Please use module level getChildForRequest.", DeprecationWarning, 2)
return getChildForRequest(self, request)
def putChild(self, path, child):
"""
Register a static child.
You almost certainly don't want '/' in your path. If you
intended to have the root of a folder, e.g. /foo/, you want
path to be ''.
@see: L{IResource.putChild}
"""
self.children[path] = child
child.server = self.server
def render(self, request):
"""
Render a given resource. See L{IResource}'s render method.
I delegate to methods of self with the form 'render_METHOD'
where METHOD is the HTTP that was used to make the
request. Examples: render_GET, render_HEAD, render_POST, and
so on. Generally you should implement those methods instead of
overriding this one.
render_METHOD methods are expected to return a byte string which will be
the rendered page, unless the return value is C{server.NOT_DONE_YET}, in
which case it is this class's responsibility to write the results using
C{request.write(data)} and then call C{request.finish()}.
Old code that overrides render() directly is likewise expected
to return a byte string or NOT_DONE_YET.
@see: L{IResource.render}
"""
m = getattr(self, 'render_' + nativeString(request.method), None)
if not m:
try:
allowedMethods = self.allowedMethods
except AttributeError:
allowedMethods = _computeAllowedMethods(self)
raise UnsupportedMethod(allowedMethods)
return m(request)
def render_HEAD(self, request):
"""
Default handling of HEAD method.
I just return self.render_GET(request). When method is HEAD,
the framework will handle this correctly.
"""
return self.render_GET(request)
def _computeAllowedMethods(resource):
"""
Compute the allowed methods on a C{Resource} based on defined render_FOO
methods. Used when raising C{UnsupportedMethod} but C{Resource} does
not define C{allowedMethods} attribute.
"""
allowedMethods = []
for name in prefixedMethodNames(resource.__class__, "render_"):
# Potentially there should be an API for encode('ascii') in this
# situation - an API for taking a Python native string (bytes on Python
# 2, text on Python 3) and returning a socket-compatible string type.
allowedMethods.append(name.encode('ascii'))
return allowedMethods
class ErrorPage(Resource):
"""
L{ErrorPage} is a resource which responds with a particular
(parameterized) status and a body consisting of HTML containing some
descriptive text. This is useful for rendering simple error pages.
@ivar template: A native string which will have a dictionary interpolated
into it to generate the response body. The dictionary has the following
keys:
- C{"code"}: The status code passed to L{ErrorPage.__init__}.
- C{"brief"}: The brief description passed to L{ErrorPage.__init__}.
- C{"detail"}: The detailed description passed to
L{ErrorPage.__init__}.
@ivar code: An integer status code which will be used for the response.
@ivar brief: A short string which will be included in the response body.
@type brief: C{str}
@ivar detail: A longer string which will be included in the response body.
@ivar detail: C{str}
"""
template = """
%(code)s - %(brief)s
%(brief)s
%(detail)s
"""
def __init__(self, status, brief, detail):
Resource.__init__(self)
self.code = status
self.brief = brief
self.detail = detail
def render(self, request):
request.setResponseCode(self.code)
request.setHeader(b"content-type", b"text/html; charset=utf-8")
interpolated = self.template % dict(
code=self.code, brief=self.brief, detail=self.detail)
if isinstance(interpolated, unicode):
return interpolated.encode('utf-8')
return interpolated
def getChild(self, chnam, request):
return self
class NoResource(ErrorPage):
"""
L{NoResource} is a specialization of L{ErrorPage} which returns the HTTP
response code I{NOT FOUND}.
"""
def __init__(self, message="Sorry. No luck finding that resource."):
ErrorPage.__init__(self, NOT_FOUND, "No Such Resource", message)
class ForbiddenResource(ErrorPage):
"""
L{ForbiddenResource} is a specialization of L{ErrorPage} which returns the
I{FORBIDDEN} HTTP response code.
"""
def __init__(self, message="Sorry, resource is forbidden."):
ErrorPage.__init__(self, FORBIDDEN, "Forbidden Resource", message)
class _IEncodingResource(Interface):
"""
A resource which knows about L{_IRequestEncoderFactory}.
@since: 12.3
"""
def getEncoder(request):
"""
Parse the request and return an encoder if applicable, using
L{_IRequestEncoderFactory.encoderForRequest}.
@return: A L{_IRequestEncoder}, or C{None}.
"""
@implementer(_IEncodingResource)
class EncodingResourceWrapper(proxyForInterface(IResource)):
"""
Wrap a L{IResource}, potentially applying an encoding to the response body
generated.
Note that the returned children resources won't be wrapped, so you have to
explicitly wrap them if you want the encoding to be applied.
@ivar encoders: A list of
L{_IRequestEncoderFactory}
returning L{_IRequestEncoder} that
may transform the data passed to C{Request.write}. The list must be
sorted in order of priority: the first encoder factory handling the
request will prevent the others from doing the same.
@type encoders: C{list}.
@since: 12.3
"""
def __init__(self, original, encoders):
super(EncodingResourceWrapper, self).__init__(original)
self._encoders = encoders
def getEncoder(self, request):
"""
Browser the list of encoders looking for one applicable encoder.
"""
for encoderFactory in self._encoders:
encoder = encoderFactory.encoderForRequest(request)
if encoder is not None:
return encoder
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
#
#
# mod_python tests
from mod_python.python22 import *
from mod_python import apache
import sys
import unittest
import re
import time
import os
import io
PY2 = sys.version[0] == '2'
# This is used for mod_python.publisher security tests
_SECRET_PASSWORD = 'root'
__ANSWER = 42
class SimpleTestCase(unittest.TestCase):
def __init__(self, methodName, req):
unittest.TestCase.__init__(self, methodName)
self.req = req
def test_apache_log_error(self):
s = self.req.server
c = self.req.connection
apache.log_error("Testing apache.log_error():", apache.APLOG_INFO, s)
apache.log_error("xEMERGx", apache.APLOG_EMERG, s)
apache.log_error("xALERTx", apache.APLOG_ALERT, s)
apache.log_error("xCRITx", apache.APLOG_CRIT, s)
apache.log_error("xERRx", apache.APLOG_ERR, s)
apache.log_error("xWARNINGx", apache.APLOG_WARNING, s)
apache.log_error("xNOTICEx", apache.APLOG_NOTICE, s)
apache.log_error("xINFOx", apache.APLOG_INFO, s)
apache.log_error("xDEBUGx", apache.APLOG_DEBUG, s)
s.log_error("xEMERGx", apache.APLOG_EMERG)
s.log_error("xALERTx", apache.APLOG_ALERT)
s.log_error("xCRITx", apache.APLOG_CRIT)
s.log_error("xERRx", apache.APLOG_ERR)
s.log_error("xWARNINGx", apache.APLOG_WARNING)
s.log_error("xNOTICEx", apache.APLOG_NOTICE)
s.log_error("xINFOx", apache.APLOG_INFO)
s.log_error("xDEBUGx", apache.APLOG_DEBUG)
c.log_error("xEMERGx", apache.APLOG_EMERG)
c.log_error("xALERTx", apache.APLOG_ALERT)
c.log_error("xCRITx", apache.APLOG_CRIT)
c.log_error("xERRx", apache.APLOG_ERR)
c.log_error("xWARNINGx", apache.APLOG_WARNING)
c.log_error("xNOTICEx", apache.APLOG_NOTICE)
c.log_error("xINFOx", apache.APLOG_INFO)
c.log_error("xDEBUGx", apache.APLOG_DEBUG)
# see what's in the log now
f = open("%s/logs/error_log" % apache.server_root())
# for some reason re doesn't like \n, why?
log = "".join(map(str.strip, f.readlines()))
f.close()
if not re.search("xEMERGx.*xALERTx.*xCRITx.*xERRx.*xWARNINGx.*xNOTICEx.*xINFOx.*xDEBUGx.*xEMERGx.*xALERTx.*xCRITx.*xERRx.*xWARNINGx.*xNOTICEx.*xINFOx.*xDEBUGx.*xEMERGx.*xALERTx.*xCRITx.*xERRx.*xWARNINGx.*xNOTICEx.*xINFOx.*xDEBUGx", log):
self.fail("Could not find test messages in error_log")
def test_apache_table(self):
log = self.req.log_error
log("Testing table object.")
# tests borrowed from Python test suite for dict
_test_table()
# inheritance
log(" inheritance")
class mytable(apache.table):
def __str__(self):
return "str() from mytable"
mt = mytable({'a':'b'})
# add()
log(" table.add()")
a = apache.table({'a':'b'})
a.add('a', 'c')
if a['a'] != ['b', 'c']:
self.fail('table.add() broken: a["a"] is %s' % repr(a["a"]))
log("Table test DONE.")
def test_req_add_common_vars(self):
self.req.log_error("Testing req.add_common_vars().")
a = len(self.req.subprocess_env)
self.req.add_common_vars()
b = len(self.req.subprocess_env)
if a >= b:
self.fail("req.subprocess_env() is same size before and after")
def test_req_add_cgi_vars(self):
self.req.log_error("Testing req.add_cgi_vars().")
a = len(self.req.subprocess_env)
self.req.add_cgi_vars()
b = len(self.req.subprocess_env)
if a >= b:
self.fail("req.subprocess_env() is same size before and after")
def test_req_members(self):
# just run through request members making sure
# they make sense
req = self.req
log = req.log_error
log("Examining request memebers:")
log(" req.connection: %s" % repr(req.connection))
s = str(type(req.connection))
if s not in ("", ""):
self.fail("strange req.connection type %s" % repr(s))
log(" req.server: '%s'" % repr(req.server))
s = str(type(req.server))
if s not in ("", ""):
self.fail("strange req.server type %s" % repr(s))
for x in ((req.next, "next"),
(req.prev, "prev"),
(req.main, "main")):
val, name = x
log(" req.%s: '%s'" % (name, repr(val)))
if val:
self.fail("strange, req.%s should be None, not %s" % (name, repr(val)))
log(" req.the_request: '%s'" % req.the_request)
if not re.match(r"GET /.* HTTP/1\.", req.the_request):
self.fail("strange req.the_request %s" % repr(req.the_request))
for x in ((req.assbackwards, "assbackwards"),
(req.proxyreq, "proxyreq"),
(req.header_only, "header_only")):
val, name = x
log(" req.%s: %s" % (name, repr(val)))
if val:
self.fail("%s should be 0" % name)
log(" req.protocol: %s" % repr(req.protocol))
if not req.protocol == req.the_request.split()[-1]:
self.fail("req.protocol doesn't match req.the_request")
log(" req.proto_num: %s" % repr(req.proto_num))
if req.proto_num != 1000 + int(req.protocol[-1]):
self.fail("req.proto_num doesn't match req.protocol")
log(" req.hostname: %s" % repr(req.hostname))
if req.hostname != "test_internal":
self.fail("req.hostname isn't 'test_internal'")
log(" req.request_time: %s" % repr(req.request_time))
if (time.time() - req.request_time) > 10:
self.fail("req.request_time suggests request started more than 10 secs ago")
log(" req.status_line: %s" % repr(req.status_line))
if req.status_line:
self.fail("req.status_line should be None at this point")
log(" req.status: %s" % repr(req.status))
if req.status != 200:
self.fail("req.status should be 200")
req.status = req.status # make sure its writable
log(" req.method: %s" % repr(req.method))
if req.method != "GET":
self.fail("req.method should be 'GET'")
log(" req.method_number: %s" % repr(req.method_number))
if req.method_number != 0:
self.fail("req.method_number should be 0")
log(" req.allowed: %s" % repr(req.allowed))
if req.allowed != 0:
self.fail("req.allowed should be 0")
log(" req.allowed_xmethods: %s" % repr(req.allowed_xmethods))
if req.allowed_xmethods != ():
self.fail("req.allowed_xmethods should be an empty tuple")
log(" req.allowed_methods: %s" % repr(req.allowed_methods))
if req.allowed_methods != ():
self.fail("req.allowed_methods should be an empty tuple")
log(" req.sent_bodyct: %s" % repr(req.sent_bodyct))
if req.sent_bodyct != 0:
self.fail("req.sent_bodyct should be 0")
log(" req.bytes_sent: %s" % repr(req.bytes_sent))
save = req.bytes_sent
log(" writing 4 bytes...")
req.write("1234")
log(" req.bytes_sent: %s" % repr(req.bytes_sent))
if req.bytes_sent - save != 4:
self.fail("req.bytes_sent should have incremented by 4, but didn't")
log(" req.mtime: %s" % repr(req.mtime))
if req.mtime != 0:
self.fail("req.mtime should be 0")
log(" req.chunked: %s" % repr(req.chunked))
if req.chunked != 1:
self.fail("req.chunked should be 1")
log(" req.range: %s" % repr(req.range))
if req.range:
self.fail("req.range should be None")
log(" req.clength: %s" % repr(req.clength))
log(" calling req.set_content_length(15)...")
req.set_content_length(15)
log(" req.clength: %s" % repr(req.clength))
if req.clength != 15:
self.fail("req.clength should be 15")
log(" req.remaining: %s" % repr(req.remaining))
if req.remaining != 0:
self.fail("req.remaining should be 0")
log(" req.read_length: %s" % repr(req.read_length))
if req.read_length != 0:
self.fail("req.read_length should be 0")
log(" req.read_body: %s" % repr(req.read_body))
if req.read_body != 0:
self.fail("req.read_body should be 0")
log(" req.read_chunked: %s" % repr(req.read_chunked))
if req.read_chunked != 0:
self.fail("req.read_chunked should be 0")
log(" req.expecting_100: %s" % repr(req.expecting_100))
if req.expecting_100 != 0:
self.fail("req.expecting_100 should be 0")
log(" req.headers_in: %s" % repr(req.headers_in))
if req.headers_in["Host"][:13].lower() != "test_internal":
self.fail("The 'Host' header should begin with 'test_internal'")
log(" req.headers_out: %s" % repr(req.headers_out))
if (("content-length" not in req.headers_out) or
req.headers_out["content-length"] != "15"):
self.fail("req.headers_out['content-length'] should be 15")
log(" req.subprocess_env: %s" % repr(req.subprocess_env))
if req.subprocess_env["SERVER_SOFTWARE"].find("Python") == -1:
self.fail("req.subprocess_env['SERVER_SOFTWARE'] should contain 'Python'")
log(" req.notes: %s" % repr(req.notes))
log(" doing req.notes['testing'] = '123' ...")
req.notes['testing'] = '123'
log(" req.notes: %s" % repr(req.notes))
if req.notes["testing"] != '123':
self.fail("req.notes['testing'] should be '123'")
log(" req.phase: %s" % repr(req.phase))
if req.phase != "PythonHandler":
self.fail("req.phase should be 'PythonHandler'")
log(" req.interpreter: %s" % repr(req.interpreter))
if req.interpreter != apache.interpreter:
self.fail("req.interpreter should be same as apache.interpreter" % repr(apache.interpreter))
if req.interpreter != req.server.server_hostname:
self.fail("req.interpreter should be same as req.server.server_hostname: %s" % repr(req.server.server_hostname))
log(" req.content_type: %s" % repr(req.content_type))
log(" doing req.content_type = 'test/123' ...")
req.content_type = 'test/123'
log(" req.content_type: %s" % repr(req.content_type))
if req.content_type != 'test/123' or not req._content_type_set:
self.fail("req.content_type should be 'test/123' and req._content_type_set 1")
log(" req.handler: %s" % repr(req.handler))
if req.handler != "mod_python":
self.fail("req.handler should be 'mod_python'")
log(" req.content_encoding: %s" % repr(req.content_encoding))
if req.content_encoding:
self.fail("req.content_encoding should be None")
log(" req.content_languages: %s" % repr(req.content_languages))
if req.content_languages != ():
self.fail("req.content_languages should be an empty tuple")
log(" req.vlist_validator: %s" % repr(req.vlist_validator))
if req.vlist_validator:
self.fail("req.vlist_validator should be None")
log(" req.user: %s" % repr(req.user))
if req.user:
self.fail("req.user should be None")
log(" req.ap_auth_type: %s" % repr(req.ap_auth_type))
if req.ap_auth_type:
self.fail("req.ap_auth_type should be None")
log(" req.no_cache: %s" % repr(req.no_cache))
if req.no_cache != 0:
self.fail("req.no_cache should be 0")
log(" req.no_local_copy: %s" % repr(req.no_local_copy))
if req.no_local_copy != 0:
self.fail("req.no_local_copy should be 0")
log(" req.unparsed_uri: %s" % repr(req.unparsed_uri))
if req.unparsed_uri != "/tests.py":
self.fail("req.unparsed_uri should be '/tests.py'")
log(" req.uri: %s" % repr(req.uri))
if req.uri != "/tests.py":
self.fail("req.uri should be '/tests.py'")
log(" req.filename: %s" % repr(req.filename))
if req.filename != req.document_root() + req.uri:
self.fail("req.filename should be req.document_root() + req.uri, but it isn't")
log(" req.canonical_filename: %s" % repr(req.canonical_filename))
if not req.canonical_filename:
self.fail("req.canonical_filename should not be blank")
log(" req.path_info: %s" % repr(req.path_info))
if req.path_info != '':
self.fail("req.path_info should be ''")
log(" req.args: %s" % repr(req.args))
if req.args:
self.fail("req.args should be None")
log(" req.finfo: %s" % repr(req.finfo))
if req.finfo[apache.FINFO_FNAME] and (req.finfo[apache.FINFO_FNAME] != req.canonical_filename):
self.fail("req.finfo[apache.FINFO_FNAME] should be the (canonical) filename")
log(" req.parsed_uri: %s" % repr(req.parsed_uri))
if req.parsed_uri[apache.URI_PATH] != '/tests.py':
self.fail("req.parsed_uri[apache.URI_PATH] should be '/tests.py'")
log(" req.used_path_info: %s" % repr(req.used_path_info))
if req.used_path_info != 2:
self.fail("req.used_path_info should be 2") # XXX really? :-)
log(" req.eos_sent: %s" % repr(req.eos_sent))
if req.eos_sent:
self.fail("req.eos_sent says we sent EOS, but we didn't")
if apache.MODULE_MAGIC_NUMBER_MAJOR > 20111130:
try:
import socket
localip = socket.gethostbyname("localhost")
except:
localip = "127.0.0.1"
log(" req.useragent_ip: %s" % repr(req.useragent_ip))
if not req.useragent_ip in ("127.0.0.1", localip):
self.fail("req.useragent_ip should be '127.0.0.1'")
log(" req.useragent_addr: %s" % repr(req.useragent_addr))
if not req.useragent_addr[0] in ("127.0.0.1", "0.0.0.0", localip):
self.fail("req.useragent_addr[0] should be '127.0.0.1' or '0.0.0.0'")
def test_req_get_config(self):
req = self.req
log = req.log_error
log("req.get_config(): %s" % repr(req.get_config()))
if req.get_config()["PythonDebug"] != "1":
self.fail("get_config return should show PythonDebug 1")
log("req.get_options(): %s" % repr(req.get_options()))
if req.get_options() != apache.table({"testing":"123"}):
self.fail("get_options() should contain 'testing':'123', contains %s"%list(req.get_options().items()))
def test_req_get_remote_host(self):
# simulating this test for real is too complex...
req = self.req
log = req.log_error
log("req.get_get_remote_host(): %s" % repr(req.get_remote_host(apache.REMOTE_HOST)))
log("req.get_get_remote_host(): %s" % repr(req.get_remote_host()))
if (req.get_remote_host(apache.REMOTE_HOST) != None) or \
(req.get_remote_host() != "127.0.0.1"):
self.fail("remote host test failed: %s" % req.get_remote_host())
def test_server_members(self):
req = self.req
log = req.log_error
server = req.server
log("Examining server memebers:")
log(" server.defn_name: %s" % repr(server.defn_name))
if server.defn_name[-9:] != "test.conf":
self.fail("server.defn_name does not end in 'test.conf'")
log(" server.defn_line_number: %s" % repr(server.defn_line_number))
if server.defn_line_number == 0:
self.fail("server.defn_line_number should not be 0")
log(" server.server_admin: %s" % repr(server.server_admin))
if server.server_admin != "serveradmin@somewhere.com":
self.fail("server.server_admin must be 'serveradmin@somewhere.com'")
log(" server.server_hostname: %s" % repr(server.server_hostname))
if server.server_hostname != "test_internal":
self.fail("server.server_hostname must be 'test_internal'")
log(" server.port: %s" % repr(server.port))
# hmm it really is 0...
#if server.port == 0:
# self.fail("server.port should not be 0")
log(" server.error_fname: %s" % repr(server.error_fname))
if server.error_fname != "logs/error_log":
self.fail("server.error_fname should be 'logs/error_log'")
log(" server.loglevel: %s" % repr(server.loglevel))
if server.loglevel != 7:
self.fail("server.loglevel should be 7")
log(" server.is_virtual: %s" % repr(server.is_virtual))
if server.is_virtual != 1:
self.fail("server.is_virtual should be 1")
log(" server.timeout: %s" % repr(server.timeout))
if not server.timeout in (5.0, 60.0):
self.fail("server.timeout should be 5.0 or 60.0")
log(" server.keep_alive_timeout: %s" % repr(server.keep_alive_timeout))
if server.keep_alive_timeout != 15.0:
self.fail("server.keep_alive_timeout should be 15.0")
log(" server.keep_alive_max: %s" % repr(server.keep_alive_max))
if server.keep_alive_max != 100:
self.fail("server.keep_alive_max should be 100")
log(" server.keep_alive: %s" % repr(server.keep_alive))
if server.keep_alive != 1:
self.fail("server.keep_alive should be 1")
log(" server.path: %s" % repr(server.path))
if server.path != "some/path":
self.fail("server.path should be 'some/path'")
log(" server.pathlen: %s" % repr(server.pathlen))
if server.pathlen != len('some/path'):
self.fail("server.pathlen should be %d" % len('some/path'))
log(" server.limit_req_line: %s" % repr(server.limit_req_line))
if server.limit_req_line != 8190:
self.fail("server.limit_req_line should be 8190")
log(" server.limit_req_fieldsize: %s" % repr(server.limit_req_fieldsize))
if server.limit_req_fieldsize != 8190:
self.fail("server.limit_req_fieldsize should be 8190")
log(" server.limit_req_fields: %s" % repr(server.limit_req_fields))
if server.limit_req_fields != 100:
self.fail("server.limit_req_fields should be 100")
log(" server.names: %s" % repr(server.names))
if server.names != ():
self.fail("server.names should be an empty tuple")
log(" server.wild_names: %s" % repr(server.wild_names))
if server.wild_names != ():
self.fail("server.wild_names should be an empty tuple")
def test_connection_members(self):
req = self.req
log = req.log_error
conn = req.connection
try:
import socket
localip = socket.gethostbyname("localhost")
except:
localip = "127.0.0.1"
log("Examining connection memebers:")
log(" connection.base_server: %s" % repr(conn.base_server))
if type(conn.base_server) is not type(req.server):
self.fail("conn.base_server should be same type as req.server")
log(" connection.local_addr: %s" % repr(conn.local_addr))
if not conn.local_addr[0] in ("127.0.0.1", "0.0.0.0", localip):
self.fail("conn.local_addr[0] should be '127.0.0.1' or '0.0.0.0'")
if apache.MODULE_MAGIC_NUMBER_MAJOR > 20111130:
log(" connection.client_addr: %s" % repr(conn.client_addr))
if not conn.client_addr[0] in ("127.0.0.1", "0.0.0.0", localip):
self.fail("conn.client_addr[0] should be '127.0.0.1' or '0.0.0.0'")
log(" connection.client_ip: %s" % repr(conn.client_ip))
if not conn.client_ip in ("127.0.0.1", localip):
self.fail("conn.client_ip should be '127.0.0.1'")
else:
log(" connection.remote_addr: %s" % repr(conn.remote_addr))
if not conn.remote_addr[0] in ("127.0.0.1", "0.0.0.0", localip):
self.fail("conn.remote_addr[0] should be '127.0.0.1' or '0.0.0.0'")
log(" connection.remote_ip: %s" % repr(conn.remote_ip))
if not conn.remote_ip in ("127.0.0.1", localip):
self.fail("conn.remote_ip should be '127.0.0.1'")
log(" connection.remote_host: %s" % repr(conn.remote_host))
if conn.remote_host is not None:
self.fail("conn.remote_host should be None")
log(" connection.remote_logname: %s" % repr(conn.remote_logname))
if conn.remote_logname is not None:
self.fail("conn.remote_logname should be None")
log(" connection.aborted: %s" % repr(conn.aborted))
if conn.aborted != 0:
self.fail("conn.aborted should be 0")
log(" connection.keepalive: %s" % repr(conn.keepalive))
if conn.keepalive != 2:
self.fail("conn.keepalive should be 2")
log(" connection.double_reverse: %s" % repr(conn.double_reverse))
if conn.double_reverse != 0:
self.fail("conn.double_reverse should be 0")
log(" connection.keepalives: %s" % repr(conn.keepalives))
if conn.keepalives != 1:
self.fail("conn.keepalives should be 1")
log(" connection.local_ip: %s" % repr(conn.local_ip))
if not conn.local_ip in ("127.0.0.1", localip):
self.fail("conn.local_ip should be '127.0.0.1'")
log(" connection.local_host: %s" % repr(conn.local_host))
if conn.local_host is not None:
self.fail("conn.local_host should be None")
log(" connection.id: %s" % repr(conn.id))
if conn.id > 10000:
self.fail("conn.id probably should not be this high?")
log(" connection.notes: %s" % repr(conn.notes))
if repr(conn.notes) != '{}':
self.fail("conn.notes should be {}")
def make_suite(req):
mpTestSuite = unittest.TestSuite()
mpTestSuite.addTest(SimpleTestCase("test_apache_log_error", req))
mpTestSuite.addTest(SimpleTestCase("test_apache_table", req))
# NB: add_common_vars must be before cgi_vars
mpTestSuite.addTest(SimpleTestCase("test_req_add_common_vars", req))
mpTestSuite.addTest(SimpleTestCase("test_req_add_cgi_vars", req))
mpTestSuite.addTest(SimpleTestCase("test_req_members", req))
mpTestSuite.addTest(SimpleTestCase("test_req_get_config", req))
mpTestSuite.addTest(SimpleTestCase("test_req_get_remote_host", req))
mpTestSuite.addTest(SimpleTestCase("test_server_members", req))
mpTestSuite.addTest(SimpleTestCase("test_connection_members", req))
return mpTestSuite
def handler(req):
if PY2:
out = io.BytesIO()
else:
out = io.StringIO()
tr = unittest.TextTestRunner(out)
result = tr.run(make_suite(req))
req.log_error(out.getvalue())
if result.wasSuccessful():
req.write("test ok")
else:
req.write("test failed")
return apache.OK
def req_add_handler(req):
req.secret_message = "foo"
req.add_handler("PythonHandler", "tests::simple_handler")
return apache.OK
def simple_handler(req):
# for req_add_handler()
if (req.secret_message == "foo"):
req.write("test ok")
return apache.OK
def req_add_bad_handler(req):
# bad_handler does not exist so adding it should
# should raise an AttributeError exception
req.log_error("req_add_bad_handler " + req.hlist.handler)
req.add_handler("PythonHandler", "tests::bad_handler")
req.log_error("req_add_bad_handler " + req.hlist.handler)
req.write("test ok")
return apache.OK
def req_add_empty_handler_string(req):
# Adding an empty string as a handler should should
# should raise an exception
req.log_error("req_add_empty_handler_string")
req.add_handler("PythonHandler", "")
req.write("no exception")
return apache.OK
def req_add_handler_empty_phase(req):
req.log_error("req_add_handler_empty_phase")
req.log_error("phase=%s" % req.phase)
req.log_error("interpreter=%s" % req.interpreter)
req.log_error("directory=%s" % req.hlist.directory)
if req.phase != "PythonHandler":
directory = os.path.dirname(__file__)
req.add_handler("PythonHandler", "tests::req_add_handler_empty_phase", directory)
else:
req.write("test ok")
return apache.OK
def accesshandler_add_handler_to_empty_hl(req):
# Prior to version 3.2.6, adding a python handler
# to and empty handler list would cause a segfault
req.secret_message = "foo"
req.log_error("accesshandler_add_handler_to_empty_hl")
req.add_handler("PythonHandler", "tests::simple_handler")
return apache.OK
def test_req_add_handler_directory(req):
# dir1 will not have a trailing slash and on Win32
# will use back slashes and not forward slashes.
dir1 = os.path.dirname(__file__)
if req.phase == "PythonFixupHandler":
req.add_handler("PythonHandler", "tests::test_req_add_handler_directory", dir1)
else:
# dir2 should only use forward slashes and
# should have a trailing forward slash added by
# call to req.add_handler(). When dir1 and dir2
# are normalised for current operating system,
# they should be equivalent.
dir2 = req.hlist.directory
if dir2[-1] != '/' or dir2.count('\\') != 0:
req.write('test failed')
else:
dir1 = os.path.normpath(dir1)
dir2 = os.path.normpath(dir2)
if dir2 != dir1:
req.write('test failed')
else:
req.write('test ok')
return apache.OK
def req_allow_methods(req):
req.allow_methods(["PYTHONIZE"])
return apache.HTTP_METHOD_NOT_ALLOWED
def req_get_basic_auth_pw(req):
LATIN1_SPAM = 'sp\xe1m'
LATIN1_EGGS = '\xe9ggs'
pw = req.get_basic_auth_pw()
if (req.user == "spam" and pw == "eggs" or
req.user == LATIN1_SPAM and pw == LATIN1_EGGS):
req.write("test ok")
else:
req.write("test failed, user %s, pw %s" % (repr(req.user), repr(pw)))
return apache.OK
def req_unauthorized(req):
pw = req.get_basic_auth_pw()
if req.user == "spam" and pw == "eggs":
req.write("test ok")
return apache.OK
return apache.HTTP_UNAUTHORIZED
def req_auth_type(req):
auth_type = req.auth_type()
if auth_type != "dummy":
req.log_error("auth_type check failed")
req.write("test failed, req.auth_type() returned: %s" % repr(auth_type))
return apache.DONE
auth_name = req.auth_name()
if auth_name != "blah":
req.log_error("auth_name check failed")
req.write("test failed, req.auth_name() returned: %s" % repr(auth_name))
return apache.DONE
if req.phase == "PythonAuthenHandler":
req.user = "dummy"
req.ap_auth_type = req.auth_type()
elif req.phase != "PythonAuthzHandler":
req.write("test ok")
return apache.OK
def req_requires(req):
if req.requires() == ('valid-user',):
req.write("test ok")
return apache.DONE
req.write("test failed")
return apache.DONE
def req_document_root(req):
req.write(req.document_root())
return apache.OK
def req_internal_redirect(req):
req.internal_redirect("/test.int")
return apache.OK
def req_internal_redirect_int(req):
# used by req_internal_redirect
req.prev.write("test ")
req.write("ok")
return apache.OK
def req_construct_url(req):
url = req.construct_url("/index.html")
if not re.match("^http://test_req_construct_url:[0-9]+/index.html$",url):
req.write("test failed")
else:
req.write("test ok")
return apache.OK
def req_read(req):
s = req.read()
req.write(s)
return apache.OK
def req_readline(req):
s = req.readline()
while s:
req.write(s)
s = req.readline()
return apache.OK
def req_readlines(req):
if 'SizeHint' in req.headers_in:
lines = req.readlines(int(req.headers_in['SizeHint']))
else:
lines = req.readlines()
req.write(b"".join(lines))
return apache.OK
def req_discard_request_body(req):
s = req.read(10)
if s != b'1234567890':
req.log_error('read() #1 returned %s' % repr(s))
req.write('test failed')
return apache.OK
status = req.discard_request_body()
if status != apache.OK:
req.log_error('discard_request_body() returned %d' % status)
return status
s = req.read()
if s:
req.log_error('read() #2 returned %s' % repr(s))
req.write('test failed')
return apache.OK
req.write('test ok')
return apache.OK
def req_register_cleanup(req):
req.cleanup_data = "req_register_cleanup test ok"
req.register_cleanup(cleanup, req)
req.write("registered cleanup that will write to log")
return apache.OK
def cleanup(data):
# for req_register_cleanup above
data.log_error(data.cleanup_data)
def server_cleanup(data):
# for srv_register_cleanup and apache_register_cleanup below
apache.log_error(data)
def req_headers_out(req):
req.headers_out["X-Test-Header"] = "test ok"
req.write("test ok")
return apache.OK
def req_headers_out_access(req):
return apache.OK
def req_sendfile(req):
import tempfile
fname = tempfile.mktemp("txt")
f = open(fname, "w")
f.write(" test ok ");
f.close()
req.sendfile(fname, 2, 7)
# os.remove(fname)
return apache.OK
def req_sendfile2(req):
import tempfile
fname = tempfile.mktemp("txt")
f = open(fname, "w")
f.write("0123456789"*100);
f.close()
req.sendfile(fname)
# os.remove(fname)
return apache.OK
def req_sendfile3(req):
"""Check if sendfile handles symlinks properly.
This is only valid on posix systems.
"""
import tempfile
# note mktemp is deprecated in python 2.3. Should use mkstemp instead.
fname = tempfile.mktemp("txt")
f = open(fname, "w")
f.write("0123456789"*100);
f.close()
fname_symlink = '%s.lnk' % fname
os.symlink(fname, fname_symlink)
req.sendfile(fname_symlink)
os.remove(fname_symlink)
os.remove(fname)
return apache.OK
def req_handler(req):
if req.phase == "PythonFixupHandler":
req.handler = "mod_python"
req.handler = None
req.handler = "mod_python"
req.add_handler("PythonHandler","tests::req_handler")
return apache.OK
elif req.phase == "PythonHandler":
req.write('test ok')
return apache.OK
else:
req.write('test failed')
return apache.OK
def req_no_cache(req):
req.no_cache = 1
req.write('test ok')
return apache.OK
def req_update_mtime(req):
assert(req.mtime == 0.0)
req.update_mtime(100.0)
assert(req.mtime == 100.0)
req.set_etag()
req.set_last_modified()
req.write('test ok')
return apache.OK
def util_redirect(req):
from mod_python import util
if req.main:
# Sub request for ErrorDocument.
req.write("test failed")
return apache.DONE
else:
if req.phase == "PythonFixupHandler":
util.redirect(req,location="/dummy",text="test ok")
else:
req.write('test failed')
return apache.OK
def req_server_get_config(req):
if req.server.get_config().get("PythonDebug", "0") != "1" or \
req.get_config().get("PythonDebug", "0") != "0":
req.write('test failed')
else:
req.write('test ok')
return apache.OK
def req_server_get_options(req):
try:
server_options = apache.main_server.get_options()
assert(server_options.get("global","0") == "0")
assert(server_options.get("override","0") == "0")
server_options = req.connection.base_server.get_options()
assert(server_options.get("global","0") == "0")
assert(server_options.get("override","0") == "0")
server_options = req.server.get_options()
assert(server_options["global"] == "1")
assert(server_options["override"] == "1")
request_options = req.get_options()
assert(request_options["global"] == "1")
assert(request_options["override"] == "2")
assert(request_options["local"] == "1")
except:
req.write('test failed')
else:
req.write('test ok')
return apache.OK
def fileupload(req):
from mod_python import util
fields = util.FieldStorage(req)
f = fields.getfirst('testfile')
if PY2:
import md5
req.write(md5.new(f.file.read()).hexdigest())
else:
from hashlib import md5
req.write(md5(f.file.read()).hexdigest())
return apache.OK
def srv_register_cleanup(req):
req.server.register_cleanup(req, server_cleanup, "srv_register_cleanup test ok")
req.write("registered server cleanup that will write to log")
return apache.OK
def apache_register_cleanup(req):
apache.register_cleanup(server_cleanup, "apache_register_cleanup test ok")
req.write("registered server cleanup that will write to log")
return apache.OK
def apache_exists_config_define(req):
if apache.exists_config_define('FOOBAR'):
req.write('FOOBAR')
else:
req.write('NO_FOOBAR')
return apache.OK
def util_fieldstorage(req):
from mod_python import util
req.write(repr(util.FieldStorage(req).list))
return apache.OK
def postreadrequest(req):
req.log_error('postreadrequest')
req.add_common_vars()
req.subprocess_env['TEST1'] = "'"
req.subprocess_env['TEST2'] = '"'
req.log_error('subprocess_env = %s' % req.subprocess_env)
req.log_error('subprocess_env.values() = %s' % list(req.subprocess_env.values()))
for value in req.subprocess_env.values():
req.log_error('VALUE = %s' % value)
for item in req.subprocess_env.items():
req.log_error('ITEM = %s' % (item,))
req.log_error('SCRIPT_FILENAME = %s' % req.subprocess_env.get('SCRIPT_FILENAME'))
req.log_error('SCRIPT_FILENAME = %s' % req.subprocess_env['SCRIPT_FILENAME'])
req.write("test ok")
return apache.DONE
def trans(req):
req.filename = req.document_root()+"/tests.py"
return apache.OK
def import_test(req):
import sys, os
directory = os.path.dirname(__file__)
assert([os.path.normpath(d) for d in sys.path].count(directory) == 1)
if "dummymodule" in sys.modules:
if "dummymodule::function" not in apache.main_server.get_options():
req.log_error("dummymodule::function not executed")
req.write("test failed")
else:
req.write("test ok")
else:
req.log_error("dummymodule not found in sys.modules")
req.write("test failed")
return apache.OK
def outputfilter(fltr):
assert(not hasattr(fltr, "non_existent"))
s = fltr.read()
while s:
fltr.write(s.upper())
s = fltr.read()
if s is None:
fltr.close()
return apache.OK
def simplehandler(req):
if req.phase != "PythonHandler":
req.write("test failed")
return apache.OK
req.write("test ok")
return apache.OK
def req_add_output_filter(req):
req.add_output_filter("MP_TEST_FILTER")
req.write("test ok")
return apache.OK
def req_register_output_filter(req):
req.register_output_filter("MP_TEST_FILTER","tests::outputfilter")
req.add_output_filter("MP_TEST_FILTER")
req.write("test ok")
return apache.OK
def connectionhandler(conn):
# read whatever
s = conn.readline().strip()
while s:
s = conn.readline().strip()
# fake an HTTP response
conn.write("HTTP/1.1 200 OK\r\n")
conn.write("Content-Length: 7\r\n\r\n")
conn.write("test ok")
return apache.OK
def pipe_ext(req):
# this is called by publisher
return "pipe ext"
def Cookie_Cookie(req):
from mod_python import Cookie
cookies = Cookie.get_cookies(req)
for k in cookies:
Cookie.add_cookie(req, cookies[k])
req.write("test ok")
return apache.OK
def Cookie_MarshalCookie(req):
from mod_python import Cookie
cookies = Cookie.get_cookies(req, Cookie.MarshalCookie,
secret="secret")
for k in cookies:
Cookie.add_cookie(req, cookies[k])
req.write("test ok")
return apache.OK
def global_lock(req):
import _apache
_apache._global_lock(req.server, 1)
time.sleep(1)
_apache._global_unlock(req.server, 1)
req.write("test ok")
return apache.OK
def Session_Session(req):
from mod_python import Session, Cookie
s = Session.Session(req)
if s.is_new():
s.save()
cookies = Cookie.get_cookies(req)
if Session.COOKIE_NAME in cookies and s.is_new():
req.write(str(cookies[Session.COOKIE_NAME]))
else:
req.write("test ok")
return apache.OK
def files_directive(req):
req.write(str(req.hlist.directory))
return apache.OK
none_handler = None
def server_return_1(req):
raise apache.SERVER_RETURN(apache.OK)
def server_return_2(req):
req.write("test ok")
return apache.OK
def phase_status_1(req):
apache.log_error("phase_status_1")
req.phases = [1]
return apache.DECLINED
def phase_status_2(req):
apache.log_error("phase_status_2")
req.phases.append(2)
req.user = "bogus"
req.ap_auth_type = "bogus"
return apache.OK
def phase_status_3(req):
apache.log_error("phase_status_3")
req.phases.append(3)
return apache.OK
def phase_status_4(req):
apache.log_error("phase_status_4")
#req.phases.append(4)
return apache.OK
def phase_status_5(req):
apache.log_error("phase_status_5")
req.phases.append(5)
return apache.DECLINED
def phase_status_6(req):
apache.log_error("phase_status_6")
req.phases.append(6)
return apache.OK
def phase_status_7(req):
apache.log_error("phase_status_7")
req.phases.append(7)
return apache.OK
def phase_status_8(req):
apache.log_error("phase_status_8")
apache.log_error("phases = %s" % req.phases)
if req.phases != [1, 2, 5, 6, 7]:
req.write("test failed")
else:
req.write("test ok")
return apache.OK
def phase_status_cleanup(req):
apache.log_error("phase_status_cleanup_log_entry")
return apache.OK
def test_sys_argv(req):
import sys
req.write(repr(sys.argv))
return apache.OK
def PythonOption_items(req):
options = list(req.get_options().items())
# The tests may using PythonOption mod_python.* in the test configuration
# We need to remove those particular options so they don't interfer
# with this test result.
options = [ o for o in options if not o[0].startswith('mod_python') ]
options.sort()
req.write(str(options))
return apache.OK
def interpreter(req):
req.write(req.interpreter)
return apache.DONE
def index(req):
return "test ok, interpreter=%s" % req.interpreter
def test_publisher(req):
return "test ok, interpreter=%s" % req.interpreter
def test_publisher_auth_nested(req):
def __auth__(req, user, password):
test_globals = test_publisher
req.notes["auth_called"] = "1"
return user == "spam" and password == "eggs"
def __access__(req, user):
req.notes["access_called"] = "1"
return 1
assert(int(req.notes.get("auth_called",0)))
assert(int(req.notes.get("access_called",0)))
return "test ok, interpreter=%s" % req.interpreter
class _test_publisher_auth_method_nested:
def method(self, req):
def __auth__(req, user, password):
test_globals = test_publisher
req.notes["auth_called"] = "1"
return user == "spam" and password == "eggs"
def __access__(req, user):
req.notes["access_called"] = "1"
return 1
assert(int(req.notes.get("auth_called",0)))
assert(int(req.notes.get("access_called",0)))
return "test ok, interpreter=%s" % req.interpreter
test_publisher_auth_method_nested = _test_publisher_auth_method_nested()
class OldStyleClassTest:
def __init__(self):
pass
def __call__(self, req):
return "test callable old-style instance ok"
def traverse(self, req):
return "test traversable old-style instance ok"
old_instance = OldStyleClassTest()
test_dict = {1:1, 2:2, 3:3}
test_dict_keys = test_dict.keys
def test_dict_iteration(req):
return test_dict_keys()
def test_generator(req):
c = 0
while c < 10:
yield c
c += 1
def server_side_include(req):
req.ssi_globals = { "data": "test" }
return apache.OK
class InstanceTest(object):
def __call__(self, req):
return "test callable instance ok"
def traverse(self, req):
return "test traversable instance ok"
instance = InstanceTest()
# Hierarchy traversal tests
class Mapping(object):
def __init__(self,name):
self.name = name
def __call__(self,req):
return "Called %s"%self.name
hierarchy_root = Mapping("root");
hierarchy_root.page1 = Mapping("page1")
hierarchy_root.page1.subpage1 = Mapping("subpage1")
hierarchy_root.page2 = Mapping("page2")
class Mapping2:
pass
hierarchy_root_2 = Mapping2()
hierarchy_root_2.__call__ = index
hierarchy_root_2.page1 = index
hierarchy_root_2.page2 = index
def _test_table():
log = apache.log_error
log(" starting _test_table")
d = apache.table()
if list(d.keys()) != []: raise TestFailed('{}.keys()')
if ('a' in d) != 0: raise TestFailed("'a' in {}")
if ('a' not in d) != 1: raise TestFailed("'a' not in {}")
if len(d) != 0: raise TestFailed('len({})')
d = {'a': 1, 'b': 2}
if len(d) != 2: raise TestFailed('len(dict)')
k = list(d.keys())
k.sort()
if k != ['a', 'b']: raise TestFailed('dict keys()')
if 'a' in d and 'b' in d and 'c' not in d: pass
else: raise TestFailed('dict keys()')
if 'a' in d and 'b' in d and 'c' not in d: pass
else: raise TestFailed('dict keys() # in/not in version')
if d['a'] != 1 or d['b'] != 2: raise TestFailed('dict item')
d['c'] = 3
d['a'] = 4
if d['c'] != 3 or d['a'] != 4: raise TestFailed('dict item assignment')
del d['b']
if d != {'a': 4, 'c': 3}: raise TestFailed('dict item deletion')
# dict.clear()
log(" table.clear()")
d = apache.table()
d['1'] = '1'
d['2'] = '2'
d['3'] = '3'
d.clear()
if d != apache.table(): raise TestFailed('dict clear')
# dict.update()
log(" table.update()")
d.update({'1':'100'})
d.update({'2':'20'})
d.update({'1':'1', '2':'2', '3':'3'})
if d != apache.table({'1':'1', '2':'2', '3':'3'}): raise TestFailed('dict update')
d.clear()
try: d.update(None)
except AttributeError: pass
else: raise TestFailed('dict.update(None), AttributeError expected')
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return list(self.d.keys())
def __getitem__(self, i):
return self.d[i]
d.update(SimpleUserDict())
if d != apache.table({1:1, 2:2, 3:3}): raise TestFailed('dict.update(instance)')
d.clear()
class FailingUserDict:
def keys(self):
raise ValueError
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed('dict.keys() expected ValueError')
class FailingUserDict:
def keys(self):
class BogonIter:
def __iter__(self):
raise ValueError
return BogonIter()
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed('iter(dict.keys()) expected ValueError')
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return 'a'
raise ValueError
def next(self):
return self.__next__()
return BogonIter()
def __getitem__(self, key):
return key
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed('iter(dict.keys()).next() expected ValueError')
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
def next(self):
return self.__next__()
return BogonIter()
def __getitem__(self, key):
raise ValueError
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed('dict.update(), __getitem__ expected ValueError')
# dict.copy()
log(" table.copy()")
d = {1:1, 2:2, 3:3}
if d.copy() != {1:1, 2:2, 3:3}: raise TestFailed('dict copy')
if apache.table().copy() != apache.table(): raise TestFailed('empty dict copy')
# dict.get()
log(" table.get()")
d = apache.table()
if d.get('c') is not None: raise TestFailed('missing {} get, no 2nd arg')
if d.get('c', '3') != '3': raise TestFailed('missing {} get, w/ 2nd arg')
d = apache.table({'a' : '1', 'b' : '2'})
if d.get('c') is not None: raise TestFailed('missing dict get, no 2nd arg')
if d.get('c', '3') != '3': raise TestFailed('missing dict get, w/ 2nd arg')
if d.get('a') != '1': raise TestFailed('present dict get, no 2nd arg')
if d.get('a', '3') != '1': raise TestFailed('present dict get, w/ 2nd arg')
# dict.setdefault()
log(" table.setdefault()")
d = apache.table()
d.setdefault('key0')
if d.setdefault('key0') is not "":
raise TestFailed('missing {} setdefault, no 2nd arg')
if d.setdefault('key0') is not "":
raise TestFailed('present {} setdefault, no 2nd arg')
# dict.popitem()
log(" table.popitem()")
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = apache.table()
b = apache.table()
for i in range(size):
a[repr(i)] = str(i)
if copymode < 0:
b[repr(i)] = str(i)
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
if va != ka: raise TestFailed("a.popitem: %s" % str(ta))
kb, vb = tb = b.popitem()
if vb != kb: raise TestFailed("b.popitem: %s" % str(tb))
if copymode < 0 and ta != tb:
raise TestFailed("a.popitem != b.popitem: %s, %s" % (
str(ta), str(tb)))
if a: raise TestFailed('a not empty after popitems: %s' % str(a))
if b: raise TestFailed('b not empty after popitems: %s' % str(b))
# iteration (just make sure we can iterate without a segfault)
d = apache.table({'a' : '1', 'b' : '2', 'c' : '3'})
log(" for k in table")
for k in d:
pass
log(" _test_table test finished")
def okay(req):
req.write("test ok")
return apache.OK
def memory(req):
# NB: This only works on Linux.
## warm up
for x in range(10000):
req.write("test ok")
## check memory usage before (the unit is pages, usually 4k)
before = list(map(int, open("/proc/self/statm").read().split()))
for x in range(100000):
req.write("test ok")
req.flush()
## check memory usage after
after = list(map(int, open("/proc/self/statm").read().split()))
req.write("|%s|%s" % (before[0], after[0]))
return apache.OK
# Extended prompt.
# Copyright (C) 2011-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
"""GDB command for working with extended prompts."""
import gdb
import gdb.prompt
class _ExtendedPrompt(gdb.Parameter):
"""Set the extended prompt.
Usage: set extended-prompt VALUE
Substitutions are applied to VALUE to compute the real prompt.
The currently defined substitutions are:
"""
# Add the prompt library's dynamically generated help to the
# __doc__ string.
__doc__ = __doc__ + gdb.prompt.prompt_help()
set_doc = "Set the extended prompt."
show_doc = "Show the extended prompt."
def __init__(self):
super(_ExtendedPrompt, self).__init__("extended-prompt",
gdb.COMMAND_SUPPORT,
gdb.PARAM_STRING_NOESCAPE)
self.value = ''
self.hook_set = False
def get_show_string (self, pvalue):
if self.value is not '':
return "The extended prompt is: " + self.value
else:
return "The extended prompt is not set."
def get_set_string (self):
if self.hook_set == False:
gdb.prompt_hook = self.before_prompt_hook
self.hook_set = True
return ""
def before_prompt_hook(self, current):
if self.value is not '':
newprompt = gdb.prompt.substitute_prompt(self.value)
return newprompt.replace('\\', '\\\\')
else:
return None
_ExtendedPrompt()
import pyqtgraph as pg
pg.mkQApp()
def test_combobox():
cb = pg.ComboBox()
items = {'a': 1, 'b': 2, 'c': 3}
cb.setItems(items)
cb.setValue(2)
assert str(cb.currentText()) == 'b'
assert cb.value() == 2
# Clear item list; value should be None
cb.clear()
assert cb.value() == None
# Reset item list; value should be set automatically
cb.setItems(items)
assert cb.value() == 2
# Clear item list; repopulate with same names and new values
items = {'a': 4, 'b': 5, 'c': 6}
cb.clear()
cb.setItems(items)
assert cb.value() == 5
# Set list instead of dict
cb.setItems(list(items.keys()))
assert str(cb.currentText()) == 'b'
cb.setValue('c')
assert cb.value() == str(cb.currentText())
assert cb.value() == 'c'
cb.setItemValue('c', 7)
assert cb.value() == 7
if __name__ == '__main__':
cb = pg.ComboBox()
cb.show()
cb.setItems({'': None, 'a': 1, 'b': 2, 'c': 3})
def fn(ind):
print("New value: %s" % cb.value())
cb.currentIndexChanged.connect(fn)
import unittest
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.content import ContentStore
from xmodule.modulestore import Location
class Content:
def __init__(self, location, content_type):
self.location = location
self.content_type = content_type
class ContentTest(unittest.TestCase):
def test_thumbnail_none(self):
# We had a bug where a thumbnail location of None was getting transformed into a Location tuple, with
# all elements being None. It is important that the location be just None for rendering.
content = StaticContent('loc', 'name', 'content_type', 'data', None, None, None)
self.assertIsNone(content.thumbnail_location)
content = StaticContent('loc', 'name', 'content_type', 'data')
self.assertIsNone(content.thumbnail_location)
def test_static_url_generation_from_courseid(self):
url = StaticContent.convert_legacy_static_url_with_course_id('images_course_image.jpg', 'foo/bar/bz')
self.assertEqual(url, '/c4x/foo/bar/asset/images_course_image.jpg')
def test_generate_thumbnail_image(self):
contentStore = ContentStore()
content = Content(Location(u'c4x', u'mitX', u'800', u'asset', u'monsters__.jpg'), None)
(thumbnail_content, thumbnail_file_location) = contentStore.generate_thumbnail(content)
self.assertIsNone(thumbnail_content)
self.assertEqual(Location(u'c4x', u'mitX', u'800', u'thumbnail', u'monsters__.jpg'), thumbnail_file_location)
def test_compute_location(self):
# We had a bug that __ got converted into a single _. Make sure that substitution of INVALID_CHARS (like space)
# still happen.
asset_location = StaticContent.compute_location('mitX', '400', 'subs__1eo_jXvZnE .srt.sjson')
self.assertEqual(Location(u'c4x', u'mitX', u'400', u'asset', u'subs__1eo_jXvZnE_.srt.sjson', None), asset_location)
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides the opening handshake processor for the WebSocket
protocol version HyBi 00.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
"""
# Note: request.connection.write/read are used in this module, even though
# mod_python document says that they should be used only in connection
# handlers. Unfortunately, we have no other options. For example,
# request.write/read are not suitable because they don't allow direct raw bytes
# writing/reading.
import logging
import re
import struct
from mod_pywebsocket import common
from mod_pywebsocket.stream import StreamHixie75
from mod_pywebsocket import util
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import check_request_line
from mod_pywebsocket.handshake._base import format_header
from mod_pywebsocket.handshake._base import get_default_port
from mod_pywebsocket.handshake._base import get_mandatory_header
from mod_pywebsocket.handshake._base import parse_host_header
from mod_pywebsocket.handshake._base import validate_mandatory_header
_MANDATORY_HEADERS = [
# key, expected value or None
[common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75],
[common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE],
]
def _validate_subprotocol(subprotocol):
"""Checks if characters in subprotocol are in range between U+0020 and
U+007E. A value in the Sec-WebSocket-Protocol field need to satisfy this
requirement.
See the Section 4.1. Opening handshake of the spec.
"""
if not subprotocol:
raise HandshakeException('Invalid subprotocol name: empty')
# Parameter should be in the range U+0020 to U+007E.
for c in subprotocol:
if not 0x20 <= ord(c) <= 0x7e:
raise HandshakeException(
'Illegal character in subprotocol name: %r' % c)
def _check_header_lines(request, mandatory_headers):
check_request_line(request)
# The expected field names, and the meaning of their corresponding
# values, are as follows.
# |Upgrade| and |Connection|
for key, expected_value in mandatory_headers:
validate_mandatory_header(request, key, expected_value)
def _build_location(request):
"""Build WebSocket location for request."""
location_parts = []
if request.is_https():
location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
else:
location_parts.append(common.WEB_SOCKET_SCHEME)
location_parts.append('://')
host, port = parse_host_header(request)
connection_port = request.connection.local_addr[1]
if port != connection_port:
raise HandshakeException('Header/connection port mismatch: %d/%d' %
(port, connection_port))
location_parts.append(host)
if (port != get_default_port(request.is_https())):
location_parts.append(':')
location_parts.append(str(port))
location_parts.append(request.unparsed_uri)
return ''.join(location_parts)
class Handshaker(object):
"""Opening handshake processor for the WebSocket protocol version HyBi 00.
"""
def __init__(self, request, dispatcher):
"""Construct an instance.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
Handshaker will add attributes such as ws_resource in performing
handshake.
"""
self._logger = util.get_class_logger(self)
self._request = request
self._dispatcher = dispatcher
def do_handshake(self):
"""Perform WebSocket Handshake.
On _request, we set
ws_resource, ws_protocol, ws_location, ws_origin, ws_challenge,
ws_challenge_md5: WebSocket handshake information.
ws_stream: Frame generation/parsing class.
ws_version: Protocol version.
Raises:
HandshakeException: when any error happened in parsing the opening
handshake request.
"""
# 5.1 Reading the client's opening handshake.
# dispatcher sets it in self._request.
_check_header_lines(self._request, _MANDATORY_HEADERS)
self._set_resource()
self._set_subprotocol()
self._set_location()
self._set_origin()
self._set_challenge_response()
self._set_protocol_version()
self._dispatcher.do_extra_handshake(self._request)
self._send_handshake()
def _set_resource(self):
self._request.ws_resource = self._request.uri
def _set_subprotocol(self):
# |Sec-WebSocket-Protocol|
subprotocol = self._request.headers_in.get(
common.SEC_WEBSOCKET_PROTOCOL_HEADER)
if subprotocol is not None:
_validate_subprotocol(subprotocol)
self._request.ws_protocol = subprotocol
def _set_location(self):
# |Host|
host = self._request.headers_in.get(common.HOST_HEADER)
if host is not None:
self._request.ws_location = _build_location(self._request)
# TODO(ukai): check host is this host.
def _set_origin(self):
# |Origin|
origin = self._request.headers_in.get(common.ORIGIN_HEADER)
if origin is not None:
self._request.ws_origin = origin
def _set_protocol_version(self):
# |Sec-WebSocket-Draft|
draft = self._request.headers_in.get(common.SEC_WEBSOCKET_DRAFT_HEADER)
if draft is not None and draft != '0':
raise HandshakeException('Illegal value for %s: %s' %
(common.SEC_WEBSOCKET_DRAFT_HEADER,
draft))
self._logger.debug('Protocol version is HyBi 00')
self._request.ws_version = common.VERSION_HYBI00
self._request.ws_stream = StreamHixie75(self._request, True)
def _set_challenge_response(self):
# 5.2 4-8.
self._request.ws_challenge = self._get_challenge()
# 5.2 9. let /response/ be the MD5 finterprint of /challenge/
self._request.ws_challenge_md5 = util.md5_hash(
self._request.ws_challenge).digest()
self._logger.debug(
'Challenge: %r (%s)',
self._request.ws_challenge,
util.hexify(self._request.ws_challenge))
self._logger.debug(
'Challenge response: %r (%s)',
self._request.ws_challenge_md5,
util.hexify(self._request.ws_challenge_md5))
def _get_key_value(self, key_field):
key_value = get_mandatory_header(self._request, key_field)
self._logger.debug('%s: %r', key_field, key_value)
# 5.2 4. let /key-number_n/ be the digits (characters in the range
# U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9)) in /key_n/,
# interpreted as a base ten integer, ignoring all other characters
# in /key_n/.
try:
key_number = int(re.sub("\\D", "", key_value))
except:
raise HandshakeException('%s field contains no digit' % key_field)
# 5.2 5. let /spaces_n/ be the number of U+0020 SPACE characters
# in /key_n/.
spaces = re.subn(" ", "", key_value)[1]
if spaces == 0:
raise HandshakeException('%s field contains no space' % key_field)
self._logger.debug(
'%s: Key-number is %d and number of spaces is %d',
key_field, key_number, spaces)
# 5.2 6. if /key-number_n/ is not an integral multiple of /spaces_n/
# then abort the WebSocket connection.
if key_number % spaces != 0:
raise HandshakeException(
'%s: Key-number (%d) is not an integral multiple of spaces '
'(%d)' % (key_field, key_number, spaces))
# 5.2 7. let /part_n/ be /key-number_n/ divided by /spaces_n/.
part = key_number / spaces
self._logger.debug('%s: Part is %d', key_field, part)
return part
def _get_challenge(self):
# 5.2 4-7.
key1 = self._get_key_value(common.SEC_WEBSOCKET_KEY1_HEADER)
key2 = self._get_key_value(common.SEC_WEBSOCKET_KEY2_HEADER)
# 5.2 8. let /challenge/ be the concatenation of /part_1/,
challenge = ''
challenge += struct.pack('!I', key1) # network byteorder int
challenge += struct.pack('!I', key2) # network byteorder int
challenge += self._request.connection.read(8)
return challenge
def _send_handshake(self):
response = []
# 5.2 10. send the following line.
response.append('HTTP/1.1 101 WebSocket Protocol Handshake\r\n')
# 5.2 11. send the following fields to the client.
response.append(format_header(
common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75))
response.append(format_header(
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
response.append(format_header(
common.SEC_WEBSOCKET_LOCATION_HEADER, self._request.ws_location))
response.append(format_header(
common.SEC_WEBSOCKET_ORIGIN_HEADER, self._request.ws_origin))
if self._request.ws_protocol:
response.append(format_header(
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
# 5.2 12. send two bytes 0x0D 0x0A.
response.append('\r\n')
# 5.2 13. send /response/
response.append(self._request.ws_challenge_md5)
raw_response = ''.join(response)
self._request.connection.write(raw_response)
self._logger.debug('Sent server\'s opening handshake: %r',
raw_response)
# vi:sts=4 sw=4 et
import re
from io import BytesIO
from time import sleep
from livestreamer.exceptions import PluginError
from livestreamer.packages.flashmedia import AMFPacket, AMFMessage
from livestreamer.packages.flashmedia.types import AMF3ObjectBase
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.stream import AkamaiHDStream
AMF_GATEWAY = "http://c.brightcove.com/services/messagebroker/amf"
AMF_MESSAGE_PREFIX = "af6b88c640c8d7b4cc75d22f7082ad95603bc627"
STREAM_NAMES = ["360p", "480p", "720p", "source"]
HTTP_HEADERS = {
"User-Agent": ("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/36.0.1944.9 Safari/537.36")
}
_url_re = re.compile("http(s)?://(\w+\.)?azubu.tv/(?P\w+)")
CHANNEL_INFO_URL = "http://api.azubu.tv/public/channel/%s/player"
_viewerexp_schema = validate.Schema(
validate.attr({
"programmedContent": {
"videoPlayer": validate.attr({
"mediaDTO": validate.attr({
"renditions": {
int: validate.attr({
"encodingRate": int,
"defaultURL": validate.text
})
}
})
})
}
})
)
@AMF3ObjectBase.register("com.brightcove.experience.ViewerExperienceRequest")
class ViewerExperienceRequest(AMF3ObjectBase):
__members__ = ["contentOverrides",
"experienceId",
"URL",
"playerKey",
"deliveryType",
"TTLToken"]
def __init__(self, URL, contentOverrides, experienceId, playerKey, TTLToken=""):
self.URL = URL
self.deliveryType = float("nan")
self.contentOverrides = contentOverrides
self.experienceId = experienceId
self.playerKey = playerKey
self.TTLToken = TTLToken
@AMF3ObjectBase.register("com.brightcove.experience.ContentOverride")
class ContentOverride(AMF3ObjectBase):
__members__ = ["featuredRefId",
"contentRefIds",
"contentId",
"contentType",
"contentIds",
"featuredId",
"contentRefId",
"target"]
def __init__(self, contentId=float("nan"), contentRefId=None, contentType=0,
target="videoPlayer"):
self.contentType = contentType
self.contentId = contentId
self.target = target
self.contentIds = None
self.contentRefId = contentRefId
self.contentRefIds = None
self.contentType = 0
self.featuredId = float("nan")
self.featuredRefId = None
class AzubuTV(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, stream):
if stream == "source":
weight = 1080
else:
weight, group = Plugin.stream_weight(stream)
return weight, "azubutv"
def _create_amf_request(self, key, video_player, player_id):
if video_player.startswith("ref:"):
content_override = ContentOverride(contentRefId=video_player[4:])
else:
content_override = ContentOverride(contentId=int(video_player))
viewer_exp_req = ViewerExperienceRequest(self.url,
[content_override],
int(player_id), key)
req = AMFPacket(version=3)
req.messages.append(AMFMessage(
"com.brightcove.experience.ExperienceRuntimeFacade.getDataForExperience",
"/1",
[AMF_MESSAGE_PREFIX, viewer_exp_req]
))
return req
def _send_amf_request(self, req, key):
headers = {
"content-type": "application/x-amf"
}
res = http.post(AMF_GATEWAY, data=bytes(req.serialize()),
headers=headers, params=dict(playerKey=key))
return AMFPacket.deserialize(BytesIO(res.content))
def _get_player_params(self, retries=5):
match = _url_re.match(self.url);
domain = match.group('domain');
try:
res = http.get(CHANNEL_INFO_URL % str(domain))
except PluginError as err:
# The server sometimes gives us 404 for no reason
if "404" in str(err) and retries:
sleep(1)
return self._get_player_params(retries - 1)
else:
raise
channel_info = http.json(res)
channel_info = channel_info['data']
key = channel_info['player_key'];
is_live = channel_info['is_live'];
stream_video = channel_info['stream_video']
if stream_video:
video_player = "ref:" + stream_video['reference_id']
else:
is_live = False
video_player = None
player_id = channel_info['player_id']
return key, video_player, player_id, is_live
def _parse_result(self, res):
res = _viewerexp_schema.validate(res)
player = res.programmedContent["videoPlayer"]
renditions = sorted(player.mediaDTO.renditions.values(),
key=lambda r: r.encodingRate or 100000000)
streams = {}
for stream_name, rendition in zip(STREAM_NAMES, renditions):
stream = AkamaiHDStream(self.session, rendition.defaultURL)
streams[stream_name] = stream
return streams
def _get_streams(self):
key, video_player, player_id, is_live = self._get_player_params()
if not is_live:
return
req = self._create_amf_request(key, video_player, player_id)
res = self._send_amf_request(req, key)
streams = {}
for message in res.messages:
if message.target_uri == "/1/onResult":
streams = self._parse_result(message.value)
return streams
__plugin__ = AzubuTV
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import requests, json
from flexget.utils.search import torrent_availability
session = requests.Session()
log = logging.getLogger('search_btn')
# TODO: btn has a limit of 150 searches per hour
class SearchBTN(object):
schema = {'type': 'string'}
def search(self, entry, config):
api_key = config
searches = entry.get('search_strings', [entry['title']])
if 'series_name' in entry:
search = {'series': entry['series_name']}
if 'series_id' in entry:
# BTN wants an ep style identifier even for sequence shows
if entry.get('series_id_type') == 'sequence':
search['name'] = 'S01E%02d' % entry['series_id']
else:
search['name'] = entry['series_id']
searches = [search]
results = set()
for search in searches:
data = json.dumps({'method': 'getTorrents', 'params': [api_key, search], 'id': 1})
try:
r = session.post('http://api.btnapps.net/', data=data, headers={'Content-type': 'application/json'})
except requests.RequestException as e:
log.error('Error searching btn: %s' % e)
continue
content = r.json()
if not content or not content['result']:
log.debug('No results from btn')
continue
if 'torrents' in content['result']:
for item in content['result']['torrents'].itervalues():
if item['Category'] != 'Episode':
continue
entry = Entry()
entry['title'] = item['ReleaseName']
entry['title'] += ' '.join(['', item['Resolution'], item['Source'], item['Codec']])
entry['url'] = item['DownloadURL']
entry['torrent_seeds'] = int(item['Seeders'])
entry['torrent_leeches'] = int(item['Leechers'])
entry['torrent_info_hash'] = item['InfoHash']
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
if item['TvdbID']:
entry['tvdb_id'] = int(item['TvdbID'])
results.add(entry)
return results
@event('plugin.register')
def register_plugin():
plugin.register(SearchBTN, 'btn', groups=['search'], api_ver=2)
# Copyright (c) 2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.SimObject import SimObject
# ACPI description table header. Subclasses contain and handle the actual
# contents as appropriate for that type of table.
class X86ACPISysDescTable(SimObject):
type = 'X86ACPISysDescTable'
cxx_class = 'X86ISA::ACPI::SysDescTable'
cxx_header = 'arch/x86/bios/acpi.hh'
abstract = True
oem_id = Param.String('', 'string identifying the oem')
oem_table_id = Param.String('', 'oem table ID')
oem_revision = Param.UInt32(0, 'oem revision number for the table')
creator_id = Param.String('',
'string identifying the generator of the table')
creator_revision = Param.UInt32(0,
'revision number for the creator of the table')
class X86ACPIRSDT(X86ACPISysDescTable):
type = 'X86ACPIRSDT'
cxx_class = 'X86ISA::ACPI::RSDT'
cxx_header = 'arch/x86/bios/acpi.hh'
entries = VectorParam.X86ACPISysDescTable([], 'system description tables')
class X86ACPIXSDT(X86ACPISysDescTable):
type = 'X86ACPIXSDT'
cxx_class = 'X86ISA::ACPI::XSDT'
cxx_header = 'arch/x86/bios/acpi.hh'
entries = VectorParam.X86ACPISysDescTable([], 'system description tables')
# Root System Description Pointer Structure
class X86ACPIRSDP(SimObject):
type = 'X86ACPIRSDP'
cxx_class = 'X86ISA::ACPI::RSDP'
cxx_header = 'arch/x86/bios/acpi.hh'
oem_id = Param.String('', 'string identifying the oem')
# Because 0 encodes ACPI 1.0, 2 encodes ACPI 3.0, the version implemented
# here.
revision = Param.UInt8(2, 'revision of ACPI being used, zero indexed')
rsdt = Param.X86ACPIRSDT(NULL, 'root system description table')
xsdt = Param.X86ACPIXSDT(X86ACPIXSDT(),
'extended system description table')
# Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import copy
from whoosh import query
from whoosh.compat import iteritems, u, PY3
from whoosh.qparser import syntax
from whoosh.qparser.common import attach
from whoosh.qparser.taggers import RegexTagger, FnTagger
from whoosh.util import rcompile
class Plugin(object):
"""Base class for parser plugins.
"""
def taggers(self, parser):
"""Should return a list of ``(Tagger, priority)`` tuples to add to the
syntax the parser understands. Lower priorities run first.
"""
return ()
def filters(self, parser):
"""Should return a list of ``(filter_function, priority)`` tuples to
add to parser.
Filter functions will be called with ``(parser, groupnode)`` and should
return a group node.
"""
return ()
class TaggingPlugin(RegexTagger):
"""A plugin that also acts as a Tagger, to avoid having an extra Tagger
class for simple cases.
A TaggingPlugin object should have a ``priority`` attribute and either a
``nodetype`` attribute or a ``create()`` method. If the subclass doesn't
override ``create()``, the base class will call ``self.nodetype`` with the
Match object's named groups as keyword arguments.
"""
priority = 0
def __init__(self, expr=None):
self.expr = rcompile(expr or self.expr)
def taggers(self, parser):
return [(self, self.priority)]
def filters(self, parser):
return ()
def create(self, parser, match):
# Groupdict keys can be unicode sometimes apparently? Convert them to
# str for use as keyword arguments. This should be Py3-safe.
kwargs = dict((str(k), v) for k, v in iteritems(match.groupdict()))
return self.nodetype(**kwargs)
class WhitespacePlugin(TaggingPlugin):
"""Tags whitespace and removes it at priority 500. Depending on whether
your plugin's filter wants to see where whitespace was in the original
query, it should run with priority lower than 500 (before removal of
whitespace) or higher than 500 (after removal of whitespace).
"""
nodetype = syntax.Whitespace
priority = 100
def __init__(self, expr=r"\s+"):
TaggingPlugin.__init__(self, expr)
def filters(self, parser):
return [(self.remove_whitespace, 500)]
def remove_whitespace(self, parser, group):
newgroup = group.empty_copy()
for node in group:
if isinstance(node, syntax.GroupNode):
newgroup.append(self.remove_whitespace(parser, node))
elif not node.is_ws():
newgroup.append(node)
return newgroup
class SingleQuotePlugin(TaggingPlugin):
"""Adds the ability to specify single "terms" containing spaces by
enclosing them in single quotes.
"""
expr = r"(^|(?<=\W))'(?P.*?)'(?=\s|\]|[)}]|$)"
nodetype = syntax.WordNode
class PrefixPlugin(TaggingPlugin):
"""Adds the ability to specify prefix queries by ending a term with an
asterisk.
This plugin is useful if you want the user to be able to create prefix but
not wildcard queries (for performance reasons). If you are including the
wildcard plugin, you should not include this plugin as well.
>>> qp = qparser.QueryParser("content", myschema)
>>> qp.remove_plugin_class(qparser.WildcardPlugin)
>>> qp.add_plugin(qparser.PrefixPlugin())
>>> q = qp.parse("pre*")
"""
class PrefixNode(syntax.TextNode):
qclass = query.Prefix
def r(self):
return "%r*" % self.text
expr = "(?P[^ \t\r\n*]+)[*](?= |$|\\))"
nodetype = PrefixNode
class WildcardPlugin(TaggingPlugin):
class WildcardNode(syntax.TextNode):
# Note that this node inherits tokenize = False from TextNode,
# so the text in this node will not be analyzed... just passed
# straight to the query
# TODO: instead of parsing a "wildcard word", create marker nodes for
# individual ? and * characters. This will have to wait for a more
# advanced wikiparser-like parser.
qclass = query.Wildcard
def r(self):
return "Wild %r" % self.text
# Any number of word chars, followed by at least one question mark or
# star, followed by any number of word chars, question marks, or stars
# \u055E = Armenian question mark
# \u061F = Arabic question mark
# \u1367 = Ethiopic question mark
qms = u("\u055E\u061F\u1367")
expr = u("(?P(\\w|[-])*[*?%s](\\w|[-*?%s])*)") % (qms, qms)
nodetype = WildcardNode
class RegexPlugin(TaggingPlugin):
"""Adds the ability to specify regular expression term queries.
The default syntax for a regular expression term is ``r"termexpr"``.
>>> qp = qparser.QueryParser("content", myschema)
>>> qp.add_plugin(qparser.RegexPlugin())
>>> q = qp.parse('foo title:r"bar+"')
"""
class RegexNode(syntax.TextNode):
qclass = query.Regex
def r(self):
return "Regex %r" % self.text
expr = 'r"(?P[^"]*)"'
nodetype = RegexNode
class BoostPlugin(TaggingPlugin):
"""Adds the ability to boost clauses of the query using the circumflex.
>>> qp = qparser.QueryParser("content", myschema)
>>> q = qp.parse("hello there^2")
"""
expr = "\\^(?P[0-9]*(\\.[0-9]+)?)($|(?=[ \t\r\n)]))"
class BoostNode(syntax.SyntaxNode):
def __init__(self, original, boost):
self.original = original
self.boost = boost
def r(self):
return "^ %s" % self.boost
def create(self, parser, match):
# Override create so we can grab group 0
original = match.group(0)
try:
boost = float(match.group("boost"))
except ValueError:
# The text after the ^ wasn't a valid number, so turn it into a
# word
node = syntax.WordNode(original)
else:
node = self.BoostNode(original, boost)
return node
def filters(self, parser):
return [(self.clean_boost, 0), (self.do_boost, 700)]
def clean_boost(self, parser, group):
"""This filter finds any BoostNodes in positions where they can't boost
the previous node (e.g. at the very beginning, after whitespace, or
after another BoostNode) and turns them into WordNodes.
"""
bnode = self.BoostNode
for i, node in enumerate(group):
if isinstance(node, bnode):
if (not i or not group[i - 1].has_boost):
group[i] = syntax.to_word(node)
return group
def do_boost(self, parser, group):
"""This filter finds BoostNodes and applies the boost to the previous
node.
"""
newgroup = group.empty_copy()
for node in group:
if isinstance(node, syntax.GroupNode):
node = self.do_boost(parser, node)
elif isinstance(node, self.BoostNode):
if (newgroup and newgroup[-1].has_boost):
# Apply the BoostNode's boost to the previous node
newgroup[-1].set_boost(node.boost)
# Skip adding the BoostNode to the new group
continue
else:
node = syntax.to_word(node)
newgroup.append(node)
return newgroup
class GroupPlugin(Plugin):
"""Adds the ability to group clauses using parentheses.
"""
# Marker nodes for open and close bracket
class OpenBracket(syntax.SyntaxNode):
def r(self):
return "("
class CloseBracket(syntax.SyntaxNode):
def r(self):
return ")"
def __init__(self, openexpr="\\(", closeexpr="\\)"):
self.openexpr = openexpr
self.closeexpr = closeexpr
def taggers(self, parser):
return [(FnTagger(self.openexpr, self.OpenBracket), 0),
(FnTagger(self.closeexpr, self.CloseBracket), 0)]
def filters(self, parser):
return [(self.do_groups, 0)]
def do_groups(self, parser, group):
"""This filter finds open and close bracket markers in a flat group
and uses them to organize the nodes into a hierarchy.
"""
ob, cb = self.OpenBracket, self.CloseBracket
# Group hierarchy stack
stack = [parser.group()]
for node in group:
if isinstance(node, ob):
# Open bracket: push a new level of hierarchy on the stack
stack.append(parser.group())
elif isinstance(node, cb):
# Close bracket: pop the current level of hierarchy and append
# it to the previous level
if len(stack) > 1:
last = stack.pop()
stack[-1].append(last)
else:
# Anything else: add it to the current level of hierarchy
stack[-1].append(node)
top = stack[0]
# If the parens were unbalanced (more opens than closes), just take
# whatever levels of hierarchy were left on the stack and tack them on
# the end of the top-level
if len(stack) > 1:
for ls in stack[1:]:
top.extend(ls)
if len(top) == 1 and isinstance(top[0], syntax.GroupNode):
boost = top.boost
top = top[0]
top.boost = boost
return top
class EveryPlugin(TaggingPlugin):
expr = "[*]:[*]"
priority = -1
def create(self, parser, match):
return self.EveryNode()
class EveryNode(syntax.SyntaxNode):
def r(self):
return "*:*"
def query(self, parser):
return query.Every()
class FieldsPlugin(TaggingPlugin):
"""Adds the ability to specify the field of a clause.
"""
class FieldnameTagger(RegexTagger):
def create(self, parser, match):
return syntax.FieldnameNode(match.group("text"), match.group(0))
def __init__(self, expr=r"(?P\w+|[*]):", remove_unknown=True):
"""
:param expr: the regular expression to use for tagging fields.
:param remove_unknown: if True, converts field specifications for
fields that aren't in the schema into regular text.
"""
self.expr = expr
self.removeunknown = remove_unknown
def taggers(self, parser):
return [(self.FieldnameTagger(self.expr), 0)]
def filters(self, parser):
return [(self.do_fieldnames, 100)]
def do_fieldnames(self, parser, group):
"""This filter finds FieldnameNodes in the tree and applies their
fieldname to the next node.
"""
fnclass = syntax.FieldnameNode
if self.removeunknown and parser.schema:
# Look for field nodes that aren't in the schema and convert them
# to text
schema = parser.schema
newgroup = group.empty_copy()
prev_field_node = None
for node in group:
if isinstance(node, fnclass) and node.fieldname not in schema:
prev_field_node = node
continue
elif prev_field_node:
# If prev_field_node is not None, it contains a field node
# that appeared before this node but isn't in the schema,
# so we'll convert it to text here
if node.has_text:
node.text = prev_field_node.original + node.text
else:
newgroup.append(syntax.to_word(prev_field_node))
prev_field_node = None
newgroup.append(node)
if prev_field_node:
newgroup.append(syntax.to_word(prev_field_node))
group = newgroup
newgroup = group.empty_copy()
# Iterate backwards through the stream, looking for field-able objects
# with field nodes in front of them
i = len(group)
while i > 0:
i -= 1
node = group[i]
if isinstance(node, fnclass):
# If we see a fieldname node, it must not have been in front
# of something fieldable, since we would have already removed
# it (since we're iterating backwards), so convert it to text
node = syntax.to_word(node)
elif isinstance(node, syntax.GroupNode):
node = self.do_fieldnames(parser, node)
if i > 0 and not node.is_ws() and isinstance(group[i - 1],
fnclass):
node.set_fieldname(group[i - 1].fieldname, override=False)
i -= 1
newgroup.append(node)
newgroup.reverse()
return newgroup
class PhrasePlugin(Plugin):
"""Adds the ability to specify phrase queries inside double quotes.
"""
# Didn't use TaggingPlugin because I need to add slop parsing at some
# point
# Expression used to find words if a schema isn't available
wordexpr = rcompile(r'\S+')
class PhraseNode(syntax.TextNode):
def __init__(self, text, textstartchar, slop=1):
syntax.TextNode.__init__(self, text)
self.textstartchar = textstartchar
self.slop = slop
def r(self):
return "%s %r~%s" % (self.__class__.__name__, self.text, self.slop)
def apply(self, fn):
return self.__class__(self.type, [fn(node) for node in self.nodes],
slop=self.slop, boost=self.boost)
def query(self, parser):
text = self.text
fieldname = self.fieldname or parser.fieldname
# We want to process the text of the phrase into "words" (tokens),
# and also record the startchar and endchar of each word
sc = self.textstartchar
if parser.schema and fieldname in parser.schema:
field = parser.schema[fieldname]
if field.analyzer:
# We have a field with an analyzer, so use it to parse
# the phrase into tokens
tokens = field.tokenize(text, mode="query", chars=True)
words = []
char_ranges = []
for t in tokens:
words.append(t.text)
char_ranges.append((sc + t.startchar, sc + t.endchar))
else:
# We have a field but it doesn't have a format object,
# for some reason (it's self-parsing?), so use process_text
# to get the texts (we won't know the start/end chars)
words = list(field.process_text(text, mode="query"))
char_ranges = [(None, None)] * len(words)
else:
# We're parsing without a schema, so just use the default
# regular expression to break the text into words
words = []
char_ranges = []
for match in PhrasePlugin.wordexpr.finditer(text):
words.append(match.group(0))
char_ranges.append((sc + match.start(), sc + match.end()))
qclass = parser.phraseclass
q = qclass(fieldname, words, slop=self.slop, boost=self.boost,
char_ranges=char_ranges)
return attach(q, self)
class PhraseTagger(RegexTagger):
def create(self, parser, match):
return PhrasePlugin.PhraseNode(match.group("text"),
match.start("text"))
def __init__(self, expr='"(?P.*?)"'):
self.expr = expr
def taggers(self, parser):
return [(self.PhraseTagger(self.expr), 0)]
class RangePlugin(Plugin):
"""Adds the ability to specify term ranges.
"""
expr = rcompile(r"""
(?P\{|\[) # Open paren
(?P
('[^']*?'\s+) # single-quoted
| # or
(.+?(?=[Tt][Oo])) # everything until "to"
)?
[Tt][Oo] # "to"
(?P
(\s+'[^']*?') # single-quoted
| # or
((.+?)(?=]|})) # everything until "]" or "}"
)?
(?P}|]) # Close paren
""", verbose=True)
class RangeTagger(RegexTagger):
def __init__(self, expr, excl_start, excl_end):
self.expr = expr
self.excl_start = excl_start
self.excl_end = excl_end
def create(self, parser, match):
start = match.group("start")
end = match.group("end")
if start:
# Strip the space before the "to"
start = start.rstrip()
# Strip single quotes
if start.startswith("'") and start.endswith("'"):
start = start[1:-1]
if end:
# Strip the space before the "to"
end = end.lstrip()
# Strip single quotes
if end.startswith("'") and end.endswith("'"):
end = end[1:-1]
# What kind of open and close brackets were used?
startexcl = match.group("open") == self.excl_start
endexcl = match.group("close") == self.excl_end
rn = syntax.RangeNode(start, end, startexcl, endexcl)
return rn
def __init__(self, expr=None, excl_start="{", excl_end="}"):
self.expr = expr or self.expr
self.excl_start = excl_start
self.excl_end = excl_end
def taggers(self, parser):
tagger = self.RangeTagger(self.expr, self.excl_start, self.excl_end)
return [(tagger, 1)]
class OperatorsPlugin(Plugin):
"""By default, adds the AND, OR, ANDNOT, ANDMAYBE, and NOT operators to
the parser syntax. This plugin scans the token stream for subclasses of
:class:`Operator` and calls their :meth:`Operator.make_group` methods
to allow them to manipulate the stream.
There are two levels of configuration available.
The first level is to change the regular expressions of the default
operators, using the ``And``, ``Or``, ``AndNot``, ``AndMaybe``, and/or
``Not`` keyword arguments. The keyword value can be a pattern string or
a compiled expression, or None to remove the operator::
qp = qparser.QueryParser("content", schema)
cp = qparser.OperatorsPlugin(And="&", Or="\\|", AndNot="&!",
AndMaybe="&~", Not=None)
qp.replace_plugin(cp)
You can also specify a list of ``(OpTagger, priority)`` pairs as the first
argument to the initializer to use custom operators. See :ref:`custom-op`
for more information on this.
"""
class OpTagger(RegexTagger):
def __init__(self, expr, grouptype, optype=syntax.InfixOperator,
leftassoc=True):
RegexTagger.__init__(self, expr)
self.grouptype = grouptype
self.optype = optype
self.leftassoc = leftassoc
def create(self, parser, match):
return self.optype(match.group(0), self.grouptype, self.leftassoc)
def __init__(self, ops=None, clean=False, And=r"\sAND\s", Or=r"\sOR\s",
AndNot=r"\sANDNOT\s", AndMaybe=r"\sANDMAYBE\s",
Not=r"(^|(?<= ))NOT\s", Require=r"(^|(?<= ))REQUIRE\s"):
if ops:
ops = list(ops)
else:
ops = []
if not clean:
ot = self.OpTagger
if Not:
ops.append((ot(Not, syntax.NotGroup, syntax.PrefixOperator),
0))
if And:
ops.append((ot(And, syntax.AndGroup), 0))
if Or:
ops.append((ot(Or, syntax.OrGroup), 0))
if AndNot:
ops.append((ot(AndNot, syntax.AndNotGroup), -5))
if AndMaybe:
ops.append((ot(AndMaybe, syntax.AndMaybeGroup), -5))
if Require:
ops.append((ot(Require, syntax.RequireGroup), 0))
self.ops = ops
def taggers(self, parser):
return self.ops
def filters(self, parser):
return [(self.do_operators, 600)]
def do_operators(self, parser, group):
"""This filter finds PrefixOperator, PostfixOperator, and InfixOperator
nodes in the tree and calls their logic to rearrange the nodes.
"""
for tagger, _ in self.ops:
# Get the operators created by the configured taggers
optype = tagger.optype
gtype = tagger.grouptype
# Left-associative infix operators are replaced left-to-right, and
# right-associative infix operators are replaced right-to-left.
# Most of the work is done in the different implementations of
# Operator.replace_self().
if tagger.leftassoc:
i = 0
while i < len(group):
t = group[i]
if isinstance(t, optype) and t.grouptype is gtype:
i = t.replace_self(parser, group, i)
else:
i += 1
else:
i = len(group) - 1
while i >= 0:
t = group[i]
if isinstance(t, optype):
i = t.replace_self(parser, group, i)
i -= 1
# Descend into the groups and recursively call do_operators
for i, t in enumerate(group):
if isinstance(t, syntax.GroupNode):
group[i] = self.do_operators(parser, t)
return group
#
class PlusMinusPlugin(Plugin):
"""Adds the ability to use + and - in a flat OR query to specify required
and prohibited terms.
This is the basis for the parser configuration returned by
``SimpleParser()``.
"""
# Marker nodes for + and -
class Plus(syntax.MarkerNode):
pass
class Minus(syntax.MarkerNode):
pass
def __init__(self, plusexpr="\\+", minusexpr="-"):
self.plusexpr = plusexpr
self.minusexpr = minusexpr
def taggers(self, parser):
return [(FnTagger(self.plusexpr, self.Plus), 0),
(FnTagger(self.minusexpr, self.Minus), 0)]
def filters(self, parser):
return [(self.do_plusminus, 510)]
def do_plusminus(self, parser, group):
"""This filter sorts nodes in a flat group into "required", "optional",
and "banned" subgroups based on the presence of plus and minus nodes.
"""
required = syntax.AndGroup()
optional = syntax.OrGroup()
banned = syntax.OrGroup()
# Which group to put the next node we see into
next = optional
for node in group:
if isinstance(node, self.Plus):
# +: put the next node in the required group
next = required
elif isinstance(node, self.Minus):
# -: put the next node in the banned group
next = banned
else:
# Anything else: put it in the appropriate group
next.append(node)
# Reset to putting things in the optional group by default
next = optional
group = optional
if required:
group = syntax.AndMaybeGroup([required, group])
if banned:
group = syntax.AndNotGroup([group, banned])
return group
class GtLtPlugin(TaggingPlugin):
"""Allows the user to use greater than/less than symbols to create range
queries::
a:>100 b:<=z c:>=-1.4 d:``, ``<``, ``>=``, ``<=``, ``=>``, and ``=<``
after a field specifier. The field specifier is required. You cannot do the
following::
>100
This plugin requires the FieldsPlugin and RangePlugin to work.
"""
class GtLtNode(syntax.SyntaxNode):
def __init__(self, rel):
self.rel = rel
def __repr__(self):
return "(%s)" % self.rel
expr = r"(?P(<=|>=|<|>|=<|=>))"
nodetype = GtLtNode
def filters(self, parser):
# Run before the fields filter removes FilenameNodes at priority 100.
return [(self.do_gtlt, 99)]
def do_gtlt(self, parser, group):
"""This filter translate FieldnameNode/GtLtNode pairs into RangeNodes.
"""
fname = syntax.FieldnameNode
newgroup = group.empty_copy()
i = 0
lasti = len(group) - 1
while i < len(group):
node = group[i]
# If this is a GtLtNode...
if isinstance(node, self.GtLtNode):
# If it's not the last node in the group...
if i < lasti:
prevnode = newgroup[-1]
nextnode = group[i + 1]
# If previous was a fieldname and next node has text
if isinstance(prevnode, fname) and nextnode.has_text:
# Make the next node into a range based on the symbol
newgroup.append(self.make_range(nextnode, node.rel))
# Skip the next node
i += 1
else:
# If it's not a GtLtNode, add it to the filtered group
newgroup.append(node)
i += 1
return newgroup
def make_range(self, node, rel):
text = node.text
if rel == "<":
n = syntax.RangeNode(None, text, False, True)
elif rel == ">":
n = syntax.RangeNode(text, None, True, False)
elif rel == "<=" or rel == "=<":
n = syntax.RangeNode(None, text, False, False)
elif rel == ">=" or rel == "=>":
n = syntax.RangeNode(text, None, False, False)
return n.set_range(node.startchar, node.endchar)
class MultifieldPlugin(Plugin):
"""Converts any unfielded terms into OR clauses that search for the
term in a specified list of fields.
>>> qp = qparser.QueryParser(None, myschema)
>>> qp.add_plugin(qparser.MultifieldPlugin(["a", "b"])
>>> qp.parse("alfa c:bravo")
And([Or([Term("a", "alfa"), Term("b", "alfa")]), Term("c", "bravo")])
This plugin is the basis for the ``MultifieldParser``.
"""
def __init__(self, fieldnames, fieldboosts=None, group=syntax.OrGroup):
"""
:param fieldnames: a list of fields to search.
:param fieldboosts: an optional dictionary mapping field names to
a boost to use for that field.
:param group: the group to use to relate the fielded terms to each
other.
"""
self.fieldnames = fieldnames
self.boosts = fieldboosts or {}
self.group = group
def filters(self, parser):
# Run after the fields filter applies explicit fieldnames (at priority
# 100)
return [(self.do_multifield, 110)]
def do_multifield(self, parser, group):
for i, node in enumerate(group):
if isinstance(node, syntax.GroupNode):
# Recurse inside groups
group[i] = self.do_multifield(parser, node)
elif node.has_fieldname and node.fieldname is None:
# For an unfielded node, create a new group containing fielded
# versions of the node for each configured "multi" field.
newnodes = []
for fname in self.fieldnames:
newnode = copy.copy(node)
newnode.set_fieldname(fname)
newnode.set_boost(self.boosts.get(fname, 1.0))
newnodes.append(newnode)
group[i] = self.group(newnodes)
return group
class FieldAliasPlugin(Plugin):
"""Adds the ability to use "aliases" of fields in the query string.
This plugin is useful for allowing users of languages that can't be
represented in ASCII to use field names in their own language, and
translate them into the "real" field names, which must be valid Python
identifiers.
>>> # Allow users to use 'body' or 'text' to refer to the 'content' field
>>> parser.add_plugin(FieldAliasPlugin({"content": ["body", "text"]}))
>>> parser.parse("text:hello")
Term("content", "hello")
"""
def __init__(self, fieldmap):
self.fieldmap = fieldmap
self.reverse = {}
for key, values in iteritems(fieldmap):
for value in values:
self.reverse[value] = key
def filters(self, parser):
return [(self.do_aliases, 90)]
def do_aliases(self, parser, group):
for i, node in enumerate(group):
if isinstance(node, syntax.GroupNode):
group[i] = self.do_aliases(parser, node)
elif node.has_fieldname and node.fieldname is not None:
fname = node.fieldname
if fname in self.reverse:
node.set_fieldname(self.reverse[fname], override=True)
return group
class CopyFieldPlugin(Plugin):
"""Looks for basic syntax nodes (terms, prefixes, wildcards, phrases, etc.)
occurring in a certain field and replaces it with a group (by default OR)
containing the original token and the token copied to a new field.
For example, the query::
hello name:matt
could be automatically converted by ``CopyFieldPlugin({"name", "author"})``
to::
hello (name:matt OR author:matt)
This is useful where one field was indexed with a differently-analyzed copy
of another, and you want the query to search both fields.
You can specify a different group type with the ``group`` keyword. You can
also specify ``group=None``, in which case the copied node is inserted
"inline" next to the original, instead of in a new group::
hello name:matt author:matt
"""
def __init__(self, map, group=syntax.OrGroup, mirror=False):
"""
:param map: a dictionary mapping names of fields to copy to the
names of the destination fields.
:param group: the type of group to create in place of the original
token. You can specify ``group=None`` to put the copied node
"inline" next to the original node instead of in a new group.
:param two_way: if True, the plugin copies both ways, so if the user
specifies a query in the 'toname' field, it will be copied to
the 'fromname' field.
"""
self.map = map
self.group = group
if mirror:
# Add in reversed mappings
map.update(dict((v, k) for k, v in iteritems(map)))
def filters(self, parser):
# Run after the fieldname filter (100) but before multifield (110)
return [(self.do_copyfield, 109)]
def do_copyfield(self, parser, group):
map = self.map
newgroup = group.empty_copy()
for node in group:
if isinstance(node, syntax.GroupNode):
# Recurse into groups
node = self.do_copyfield(parser, node)
elif node.has_fieldname:
fname = node.fieldname or parser.fieldname
if fname in map:
newnode = copy.copy(node)
newnode.set_fieldname(map[fname], override=True)
if self.group is None:
newgroup.append(node)
newgroup.append(newnode)
else:
newgroup.append(self.group([node, newnode]))
continue
newgroup.append(node)
return newgroup
class PseudoFieldPlugin(Plugin):
"""This is an advanced plugin that lets you define "pseudo-fields" the user
can use in their queries. When the parser encounters one of these fields,
it runs a given function on the following node in the abstract syntax tree.
Unfortunately writing the transform function(s) requires knowledge of the
parser's abstract syntax tree classes. A transform function takes a
:class:`whoosh.qparser.SyntaxNode` and returns a
:class:`~whoosh.qparser.SyntaxNode` (or None if the node should be removed
instead of transformed).
Some things you can do in the transform function::
from whoosh import qparser
def my_xform_fn(node):
# Is this a text node?
if node.has_text:
# Change the node's text
node.text = node.text + "foo"
# Change the node into a prefix query
node = qparser.PrefixPlugin.PrefixNode(node.text)
# Set the field the node should search in
node.set_fieldname("title")
return node
else:
# If the pseudo-field wasn't applied to a text node (e.g.
# it preceded a group, as in ``pfield:(a OR b)`` ), remove the
# node. Alternatively you could just ``return node`` here to
# leave the non-text node intact.
return None
In the following example, if the user types ``regex:foo.bar``, the function
transforms the text in the pseudo-field "regex" into a regular expression
query in the "content" field::
from whoosh import qparser
def regex_maker(node):
if node.has_text:
node = qparser.RegexPlugin.RegexNode(node.text)
node.set_fieldname("content")
return node
qp = qparser.QueryParser("content", myindex.schema)
qp.add_plugin(qparser.PseudoFieldPlugin({"regex": regex_maker}))
q = qp.parse("alfa regex:br.vo")
The name of the "pseudo" field can be the same as an actual field. Imagine
the schema has a field named ``reverse``, and you want the user to be able
to type ``reverse:foo`` and transform it to ``reverse:(foo OR oof)``::
def rev_text(node):
if node.has_text:
# Create a word node for the reversed text
revtext = node.text[::-1] # Reverse the text
rnode = qparser.WordNode(revtext)
# Put the original node and the reversed node in an OrGroup
group = qparser.OrGroup([node, rnode])
# Need to set the fieldname here because the PseudoFieldPlugin
# removes the field name syntax
group.set_fieldname("reverse")
return group
qp = qparser.QueryParser("content", myindex.schema)
qp.add_plugin(qparser.PseudoFieldPlugin({"reverse": rev_text}))
q = qp.parse("alfa reverse:bravo")
Note that transforming the query like this can potentially really confuse
the spell checker!
This plugin works as a filter, so it can only operate on the query after it
has been parsed into an abstract syntax tree. For parsing control (i.e. to
give a pseudo-field its own special syntax), you would need to write your
own parsing plugin.
"""
def __init__(self, xform_map):
"""
:param xform_map: a dictionary mapping psuedo-field names to transform
functions. The function should take a
:class:`whoosh.qparser.SyntaxNode` as an argument, and return a
:class:`~whoosh.qparser.SyntaxNode`. If the function returns None,
the node will be removed from the query.
"""
self.xform_map = xform_map
def filters(self, parser):
# Run before the fieldname filter (100)
return [(self.do_pseudofield, 99)]
def do_pseudofield(self, parser, group):
xform_map = self.xform_map
newgroup = group.empty_copy()
xform_next = None
for node in group:
if isinstance(node, syntax.GroupNode):
node = self.do_pseudofield(parser, node)
elif (isinstance(node, syntax.FieldnameNode)
and node.fieldname in xform_map):
xform_next = xform_map[node.fieldname]
continue
if xform_next:
newnode = xform_next(node)
xform_next = None
if newnode is None:
continue
else:
newnode.set_range(node.startchar, node.endchar)
node = newnode
newgroup.append(node)
return newgroup
"""Exports proof logs to OCaml files to be loaded by HOL Light.
Processes multiple proof logs, but can generate at most one proof per theorem.
"""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import tensorflow as tf
from typing import Dict, Iterable, List, Text
from deepmath.deephol import deephol_pb2
from deepmath.deephol.utilities import proof_analysis
from deepmath.deephol import theorem_fingerprint
from deepmath.proof_assistant import proof_assistant_pb2
class ProofFailedError(Exception):
pass
def put_in_quotes(s: Text):
return '"%s"' % s
def _tactic_string_to_ocaml(tactic_string: Text) -> Text:
return 'Parse_tactic.parse ' + put_in_quotes(tactic_string)
def tactic_application_to_string(t_app: deephol_pb2.TacticApplication) -> Text:
"""Generate tactic strings.
Args:
t_app: TacticApplication proto
Returns:
tactic string; to be parsed by third_party/hol_light/parse_tactic.ml
Raises:
ProofFailedError: When invariants of the tactic application are not met.
"""
tactic_str = str(t_app.tactic)
for i, param in enumerate(t_app.parameters):
tactic_str += ' '
if param.parameter_type == deephol_pb2.Tactic.UNKNOWN:
if not param.unknown:
raise ProofFailedError(
'No (or empty) parameter UNKNOWN given for parameter '
'index %d of tactic %s' % (i, t_app.tactic))
tactic_str += str(param.unknown)
elif param.parameter_type == deephol_pb2.Tactic.TERM:
if not param.term:
raise ProofFailedError('Tactic %s expected term at parameter index %d' %
(t_app.tactic, i))
tactic_str += str(param.term)
elif param.parameter_type == deephol_pb2.Tactic.THEOREM:
if not param.theorems or len(param.theorems) != 1:
raise ProofFailedError(
'Tactic %s expected single theorem at parameter index %d' %
(t_app.tactic, i))
tactic_str += theorem_fingerprint.ToTacticArgument(param.theorems[0])
elif param.parameter_type == deephol_pb2.Tactic.THEOREM_LIST:
if not param.theorems:
tactic_str += '[ ]'
else:
tactic_str += str('[ %s ]' % ' ; '.join([
theorem_fingerprint.ToTacticArgument(thm) for thm in param.theorems
]))
else:
raise ProofFailedError('Unsupported param type: %s' %
str(param.parameter_type))
return tactic_str
def proof_log_as_dict(log: deephol_pb2.ProofLog
) -> Dict[int, deephol_pb2.ProofNode]:
"""Turns proof log into a dictionary."""
d = {}
for node in log.nodes:
fingerprint = theorem_fingerprint.Fingerprint(node.goal)
if fingerprint in d:
raise ValueError('Duplicate subgoal in fingerprint. Ignoring')
d[fingerprint] = node
return d
def proof_linearization(proof_log: deephol_pb2.ProofLog
) -> List[deephol_pb2.TacticApplication]:
"""Turns a proof into a list of tactic applications."""
if not proof_log.HasField('theorem_in_database'):
raise ValueError('Proof log requires field theorem_in_database')
node_dict = proof_log_as_dict(proof_log)
fingerprint = theorem_fingerprint.Fingerprint(proof_log.theorem_in_database)
if fingerprint not in node_dict:
raise ValueError(
'Fingerprint of proof_log.theorem_in_database missing in the proof log.'
)
# Compute a linearization of the tactic applications in left-first order.
tactics = []
open_goals = [proof_log.theorem_in_database]
visited = set()
while open_goals:
goal = open_goals.pop()
fingerprint = theorem_fingerprint.Fingerprint(goal)
if fingerprint in visited:
raise ProofFailedError('Cycle detected!')
visited.add(fingerprint)
try:
proofnode = node_dict[fingerprint]
except KeyError:
raise ProofFailedError('Subgoal not found in proof log: %s.' % str(goal))
if not proofnode.proofs:
raise ProofFailedError('No tactic app found for goal %s' % str(goal))
if len(proofnode.proofs) > 1:
tf.logging.warning('Multiple proofs detected for goal; ignoring all but '
'the first one.')
tactic_application = proofnode.proofs[0] # only checking the first one
tactics.append(tactic_application)
subgoals = list(tactic_application.subgoals) # create a copy
subgoals.reverse() # to enable getting next goal with subgoals.pop()
open_goals.extend(subgoals)
return tactics
def ocaml_proof(proof_log: deephol_pb2.ProofLog) -> List[Text]:
"""Turns a proof log into OCaml code.
Args:
proof_log: Must contain exactly one proof of the given theorem.
Returns:
OCaml code for the proof.
Raises:
ProofFailedError: If an error in the proof is detected.
ValueError: If an error in the checking logic is detected.
"""
if not proof_log.HasField('theorem_in_database'):
raise ValueError('Expected field proof_log.theorem_in_database to be set.')
theorem = proof_log.theorem_in_database
lines = ['']
if theorem.pretty_printed:
# Quotes around the expression are necessary to avoid
# interpretation of '(*' and '*)' as nested comments.
lines.append('(* "%s" *)' % theorem.pretty_printed)
lines.append('')
tactics = proof_linearization(proof_log)
ocaml_parsed_tactics = [
_tactic_string_to_ocaml(tactic_application_to_string(tactic))
for tactic in tactics
]
proof = ' THEN\n '.join(ocaml_parsed_tactics)
quoted_hypotheses = map(put_in_quotes, theorem.hypotheses)
wrapped_proof = 'fun () ->\n decode_goal [%s] "%s",\n %s' % (
'; '.join(quoted_hypotheses), theorem.conclusion, proof)
in_core = 'true' if 'core' in theorem.library_tag else 'false'
lines.append('register_proof %d (\n %s) %s;;' %
(theorem.goal_fingerprint, wrapped_proof, in_core))
return lines
def ocaml_proof_header():
"""Creates the prelude to the OCaml file; enabling the proofs to be loaded."""
return [
'set_jrh_lexer;;', 'open Lib;;', 'open Printer;;',
'open Theorem_fingerprint;;', 'open Import_proofs;;', 'open Tactics;;',
'', 'Printer.current_encoding := Printer.Sexp;;', ''
]
def verify(proof_logs: Iterable[deephol_pb2.ProofLog],
theorem_database: proof_assistant_pb2.TheoremDatabase) -> Text:
"""Generates an OCaml file of proofs for HOL Light to replay.
Args:
proof_logs: Proofs to be checked; assumes the top theorem is the first node
of each proof log, and that there is at most one proof log for each
theorem.
theorem_database: list of theorems and definitions
Returns:
An OCaml file as string.
Raises:
ValueError: If the proof logs could not be converted to OCaml.
"""
proof_logs_processed = 0
proof_logs_with_closed_proofs = 0
proof_logs_without_proof = 0
theorems_with_closed_proofs = 0
successful_proofs = 0
failed_proofs = 0
missing_proofs = 0
missing_in_database = 0
duplicate_proofs = 0
# Prepare theorem databse for efficient lookup
theorem_database_fingerprints = {
theorem_fingerprint.Fingerprint(t) for t in theorem_database.theorems
}
# Count closed proofs in proof log and index by fingerprint of theorems
proof_logs_dict = {}
for log in proof_logs:
proof_logs_processed += 1
if not log.nodes or log.nodes[0].status != deephol_pb2.ProofNode.PROVED:
proof_logs_without_proof += 1
continue
proof_logs_with_closed_proofs += 1
# Ensure consistency of log.nodes[0] and log.theorem_in_database
node0_is_thm = log.nodes[0].goal.tag == proof_assistant_pb2.Theorem.THEOREM
if not node0_is_thm and not log.HasField('theorem_in_database'):
raise ValueError('Not sure which theorem this log proves.')
if not log.HasField('theorem_in_database'):
log.theorem_in_database.CopyFrom(log.nodes[0].goal)
# Start the actual loop logic
fingerprint = theorem_fingerprint.Fingerprint(log.theorem_in_database)
if fingerprint in proof_logs_dict:
tf.logging.warning(
'Can generate at most one OCaml proof per theorem. '
'Dectected an additional proof for fingerprint %d.\n\n%s',
fingerprint, str(log.nodes[0].goal))
duplicate_proofs += 1
continue
proof_logs_dict[fingerprint] = log
theorems_with_closed_proofs += 1
if fingerprint not in theorem_database_fingerprints:
missing_in_database += 1
# MAIN LOOP
lines = ocaml_proof_header()
for theorem in theorem_database.theorems:
# Find theorem and its proof in the proof logs
fingerprint = theorem_fingerprint.Fingerprint(theorem)
try:
proof_log = proof_logs_dict[fingerprint]
except KeyError:
continue
try:
# Extract a single proof from the proof log
extracted = proof_analysis.extract_proof(proof_log)
if not extracted:
raise ValueError('Proof log claims a closed proof for '
'fingerprint %d, but no proof could be '
'extracted' % fingerprint)
lines.extend(ocaml_proof(extracted))
successful_proofs += 1
except ProofFailedError as e:
tf.logging.error('Proof of %s failed: %s',
theorem_fingerprint.ToTacticArgument(theorem), str(e))
failed_proofs += 1
# Detailed stats
tf.logging.info('PROOF LOG STATS')
tf.logging.info('Proof logs processed: %d', proof_logs_processed)
tf.logging.info('Proof logs without proofs: %d', proof_logs_without_proof)
tf.logging.info('Proof logs with closed proofs: %d',
proof_logs_with_closed_proofs)
tf.logging.info('PROOF STATS')
tf.logging.info('Successful proofs: %d', successful_proofs)
tf.logging.info('Missing proofs: %d', missing_proofs)
tf.logging.info('Failed proofs: %d', failed_proofs)
tf.logging.info('Theorems with proofs in proof logs: %d',
theorems_with_closed_proofs)
if duplicate_proofs:
tf.logging.warning('Proofs in proof logs that were ignored: %d',
duplicate_proofs)
if missing_in_database:
tf.logging.warning(
'Found a proof for a theorem that is not in the theorem database',
missing_in_database)
if successful_proofs + failed_proofs != theorems_with_closed_proofs:
raise ValueError('Internal error in the proof checker. Number of theorems '
'checked did not match the proof log.')
if successful_proofs < theorems_with_closed_proofs or failed_proofs > 0:
tf.logging.warning('Proof log could NOT be verified.')
return '\n'.join(lines)
# (c) 2012-2014, Michael DeHaan
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.task_executor import TaskExecutor
from ansible.playbook.play_context import PlayContext
from ansible.plugins import action_loader, lookup_loader
from ansible.parsing.yaml.objects import AnsibleUnicode
from units.mock.loader import DictDataLoader
class TestTaskExecutor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_task_executor_init(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
new_stdin = None
job_vars = dict()
mock_queue = MagicMock()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
rslt_q = mock_queue,
)
def test_task_executor_run(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task._role._role_path = '/path/to/role/foo'
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_queue = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
rslt_q = mock_queue,
)
te._get_loop_items = MagicMock(return_value=None)
te._execute = MagicMock(return_value=dict())
res = te.run()
te._get_loop_items = MagicMock(return_value=[])
res = te.run()
te._get_loop_items = MagicMock(return_value=['a','b','c'])
te._run_loop = MagicMock(return_value=[dict(item='a', changed=True), dict(item='b', failed=True), dict(item='c')])
res = te.run()
te._get_loop_items = MagicMock(side_effect=AnsibleError(""))
res = te.run()
self.assertIn("failed", res)
def test_task_executor_get_loop_items(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.loop = 'items'
mock_task.loop_args = ['a', 'b', 'c']
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_shared_loader.lookup_loader = lookup_loader
new_stdin = None
job_vars = dict()
mock_queue = MagicMock()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
rslt_q = mock_queue,
)
items = te._get_loop_items()
self.assertEqual(items, ['a', 'b', 'c'])
def test_task_executor_run_loop(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
def _copy(exclude_parent=False, exclude_tasks=False):
new_item = MagicMock()
return new_item
mock_task = MagicMock()
mock_task.copy.side_effect = _copy
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_queue = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
rslt_q = mock_queue,
)
def _execute(variables):
return dict(item=variables.get('item'))
te._squash_items = MagicMock(return_value=items)
te._execute = MagicMock(side_effect=_execute)
res = te._run_loop(items)
self.assertEqual(len(res), 3)
def test_task_executor_squash_items(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
loop_var = 'item'
def _evaluate_conditional(templar, variables):
item = variables.get(loop_var)
if item == 'b':
return False
return True
mock_task = MagicMock()
mock_task.evaluate_conditional.side_effect = _evaluate_conditional
mock_play_context = MagicMock()
mock_shared_loader = None
mock_queue = MagicMock()
new_stdin = None
job_vars = dict(pkg_mgr='yum')
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
rslt_q = mock_queue,
)
#
# No replacement
#
mock_task.action = 'yum'
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertIsInstance(mock_task.args, MagicMock)
mock_task.action = 'foo'
mock_task.args={'name': '{{item}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': '{{item}}'})
mock_task.action = 'yum'
mock_task.args={'name': 'static'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': 'static'})
mock_task.action = 'yum'
mock_task.args={'name': '{{pkg_mgr}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': '{{pkg_mgr}}'})
mock_task.action = '{{unknown}}'
mock_task.args={'name': '{{item}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
self.assertEqual(mock_task.args, {'name': '{{item}}'})
# Could do something like this to recover from bad deps in a package
job_vars = dict(pkg_mgr='yum', packages=['a', 'b'])
items = [ 'absent', 'latest' ]
mock_task.action = 'yum'
mock_task.args = {'name': '{{ packages }}', 'state': '{{ item }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ packages }}', 'state': '{{ item }}'})
# Maybe should raise an error in this case. The user would have to specify:
# - yum: name="{{ packages[item] }}"
# with_items:
# - ['a', 'b']
# - ['foo', 'bar']
# you can't use a list as a dict key so that would probably throw
# an error later. If so, we can throw it now instead.
# Squashing in this case would not be intuitive as the user is being
# explicit in using each list entry as a key.
job_vars = dict(pkg_mgr='yum', packages={ "a": "foo", "b": "bar", "foo": "baz", "bar": "quux" })
items = [['a', 'b'], ['foo', 'bar']]
mock_task.action = 'yum'
mock_task.args = {'name': '{{ packages[item] }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ packages[item] }}'})
#
# Replaces
#
items = ['a', 'b', 'c']
mock_task.action = 'yum'
mock_task.args={'name': '{{item}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, [['a','c']])
self.assertEqual(mock_task.args, {'name': ['a','c']})
mock_task.action = '{{pkg_mgr}}'
mock_task.args={'name': '{{item}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
self.assertEqual(new_items, [['a', 'c']])
self.assertEqual(mock_task.args, {'name': ['a','c']})
# New loop_var
mock_task.action = 'yum'
mock_task.args = {'name': '{{a_loop_var_item}}'}
mock_task.loop_control = {'loop_var': 'a_loop_var_item'}
loop_var = 'a_loop_var_item'
new_items = te._squash_items(items=items, loop_var='a_loop_var_item', variables=job_vars)
self.assertEqual(new_items, [['a', 'c']])
self.assertEqual(mock_task.args, {'name': ['a','c']})
loop_var = 'item'
#
# These are presently not optimized but could be in the future.
# Expected output if they were optimized is given as a comment
# Please move these to a different section if they are optimized
#
# Squashing lists
job_vars = dict(pkg_mgr='yum')
items = [['a', 'b'], ['foo', 'bar']]
mock_task.action = 'yum'
mock_task.args = {'name': '{{ item }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
#self.assertEqual(new_items, [['a', 'b', 'foo', 'bar']])
#self.assertEqual(mock_task.args, {'name': ['a', 'b', 'foo', 'bar']})
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ item }}'})
# Retrieving from a dict
items = ['a', 'b', 'foo']
mock_task.action = 'yum'
mock_task.args = {'name': '{{ packages[item] }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
#self.assertEqual(new_items, [['foo', 'baz']])
#self.assertEqual(mock_task.args, {'name': ['foo', 'baz']})
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ packages[item] }}'})
# Another way to retrieve from a dict
job_vars = dict(pkg_mgr='yum')
items = [{'package': 'foo'}, {'package': 'bar'}]
mock_task.action = 'yum'
mock_task.args = {'name': '{{ item["package"] }}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
#self.assertEqual(new_items, [['foo', 'bar']])
#self.assertEqual(mock_task.args, {'name': ['foo', 'bar']})
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{ item["package"] }}'})
items = [dict(name='a', state='present'),
dict(name='b', state='present'),
dict(name='c', state='present')]
mock_task.action = 'yum'
mock_task.args={'name': '{{item.name}}', 'state': '{{item.state}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
#self.assertEqual(new_items, [dict(name=['a', 'b', 'c'], state='present')])
#self.assertEqual(mock_task.args, {'name': ['a', 'b', 'c'], 'state': 'present'})
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'})
items = [dict(name='a', state='present'),
dict(name='b', state='present'),
dict(name='c', state='absent')]
mock_task.action = 'yum'
mock_task.args={'name': '{{item.name}}', 'state': '{{item.state}}'}
new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
#self.assertEqual(new_items, [dict(name=['a', 'b'], state='present'),
# dict(name='c', state='absent')])
#self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'})
self.assertEqual(new_items, items)
self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'})
def test_task_executor_execute(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.args = dict()
mock_task.retries = 0
mock_task.delay = -1
mock_task.register = 'foo'
mock_task.until = None
mock_task.changed_when = None
mock_task.failed_when = None
mock_task.post_validate.return_value = None
# mock_task.async cannot be left unset, because on Python 3 MagicMock()
# > 0 raises a TypeError There are two reasons for using the value 1
# here: on Python 2 comparing MagicMock() > 0 returns True, and the
# other reason is that if I specify 0 here, the test fails. ;)
mock_task.async = 1
mock_task.poll = 0
mock_play_context = MagicMock()
mock_play_context.post_validate.return_value = None
mock_play_context.update_vars.return_value = None
mock_connection = MagicMock()
mock_connection.set_host_overrides.return_value = None
mock_connection._connect.return_value = None
mock_action = MagicMock()
mock_queue = MagicMock()
shared_loader = None
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = shared_loader,
rslt_q = mock_queue,
)
te._get_connection = MagicMock(return_value=mock_connection)
te._get_action_handler = MagicMock(return_value=mock_action)
mock_action.run.return_value = dict(ansible_facts=dict())
res = te._execute()
mock_task.changed_when = MagicMock(return_value=AnsibleUnicode("1 == 1"))
res = te._execute()
mock_task.changed_when = None
mock_task.failed_when = MagicMock(return_value=AnsibleUnicode("1 == 1"))
res = te._execute()
mock_task.failed_when = None
mock_task.evaluate_conditional.return_value = False
res = te._execute()
mock_task.evaluate_conditional.return_value = True
mock_task.args = dict(_raw_params='foo.yml', a='foo', b='bar')
mock_task.action = 'include'
res = te._execute()
def test_task_executor_poll_async_result(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.async = 0.1
mock_task.poll = 0.05
mock_play_context = MagicMock()
mock_connection = MagicMock()
mock_action = MagicMock()
mock_queue = MagicMock()
shared_loader = MagicMock()
shared_loader.action_loader = action_loader
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = shared_loader,
rslt_q = mock_queue,
)
te._connection = MagicMock()
def _get(*args, **kwargs):
mock_action = MagicMock()
mock_action.run.return_value = dict(stdout='')
return mock_action
# testing with some bad values in the result passed to poll async,
# and with a bad value returned from the mock action
with patch.object(action_loader, 'get', _get):
mock_templar = MagicMock()
res = te._poll_async_result(result=dict(), templar=mock_templar)
self.assertIn('failed', res)
res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
self.assertIn('failed', res)
def _get(*args, **kwargs):
mock_action = MagicMock()
mock_action.run.return_value = dict(finished=1)
return mock_action
# now testing with good values
with patch.object(action_loader, 'get', _get):
mock_templar = MagicMock()
res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
self.assertEqual(res, dict(finished=1))
#!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-position-property-001 ~ 006
which cover all possible values of text-emphasis-position property with
all combination of three main writing modes and two orientations. Only
test files are generated by this script. It also outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
import itertools
TEST_FILE = 'text-emphasis-position-property-{:03}{}.html'
REF_FILE = 'text-emphasis-position-property-{:03}-ref.html'
TEST_TEMPLATE = '''
CSS Test: text-emphasis-position: {value}, {title}
Pass if the emphasis marks are {position} the text below:
試験テスト
'''
SUFFIXES = ['', 'a', 'b', 'c', 'd', 'e', 'f', 'g']
WRITING_MODES = ["horizontal-tb", "vertical-rl", "vertical-lr"]
POSITION_HORIZONTAL = ["over", "under"]
POSITION_VERTICAL = ["right", "left"]
REF_MAP_MIXED = { "over": 1, "under": 2, "right": 3, "left": 4 }
REF_MAP_SIDEWAYS = { "right": 5, "left": 6 }
POSITION_TEXT = { "over": "over", "under": "under",
"right": "to the right of", "left": "to the left of" }
suffixes = [iter(SUFFIXES) for i in range(6)]
reftest_items = []
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
def write_test_file(idx, suffix, wm, orient, value, position):
filename = TEST_FILE.format(idx, suffix)
write_file(filename, TEST_TEMPLATE.format(
value=value, wm=wm, orient=orient, index=idx, position=position,
title=(wm if orient == "mixed" else "{}, {}".format(wm, orient))))
reftest_items.append("== {} {}".format(filename, REF_FILE.format(idx)))
def write_test_files(wm, orient, pos1, pos2):
idx = (REF_MAP_MIXED if orient == "mixed" else REF_MAP_SIDEWAYS)[pos1]
position = POSITION_TEXT[pos1]
suffix = suffixes[idx - 1]
write_test_file(idx, next(suffix), wm, orient, pos1 + " " + pos2, position)
write_test_file(idx, next(suffix), wm, orient, pos2 + " " + pos1, position)
for wm in WRITING_MODES:
if wm == "horizontal-tb":
effective_pos = POSITION_HORIZONTAL
ineffective_pos = POSITION_VERTICAL
else:
effective_pos = POSITION_VERTICAL
ineffective_pos = POSITION_HORIZONTAL
for pos1, pos2 in itertools.product(effective_pos, ineffective_pos):
write_test_files(wm, "mixed", pos1, pos2)
if wm != "horizontal-tb":
write_test_files(wm, "sideways", pos1, pos2)
print("# START tests from {}".format(__file__))
reftest_items.sort()
for item in reftest_items:
print(item)
print("# END tests from {}".format(__file__))
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains container classes to represent different protocol buffer types.
This file defines container classes which represent categories of protocol
buffer field types which need extra maintenance. Currently these categories
are:
- Repeated scalar fields - These are all repeated fields which aren't
composite (e.g. they are of simple types like int32, string, etc).
- Repeated composite fields - Repeated fields which are composite. This
includes groups and nested messages.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class BaseContainer(object):
"""Base container class."""
# Minimizes memory usage and disallows assignment to other attributes.
__slots__ = ['_message_listener', '_values']
def __init__(self, message_listener):
"""
Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
TransitionToNonempty() method when it transitions from being empty to
being nonempty.
"""
self._message_listener = message_listener
self._values = []
def __getitem__(self, key):
"""Retrieves item by the specified key."""
return self._values[key]
def __len__(self):
"""Returns the number of elements in the container."""
return len(self._values)
def __ne__(self, other):
"""Checks if another instance isn't equal to this one."""
# The concrete classes should define __eq__.
return not self == other
class RepeatedScalarFieldContainer(BaseContainer):
"""Simple, type-checked, list-like container for holding repeated scalars."""
# Disallows assignment to other attributes.
__slots__ = ['_type_checker']
def __init__(self, message_listener, type_checker):
"""
Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
TransitionToNonempty() method when it transitions from being empty to
being nonempty.
type_checker: A type_checkers.ValueChecker instance to run on elements
inserted into this container.
"""
super(RepeatedScalarFieldContainer, self).__init__(message_listener)
self._type_checker = type_checker
def append(self, value):
"""Appends an item to the list. Similar to list.append()."""
self.insert(len(self._values), value)
def insert(self, key, value):
"""Inserts the item at the specified position. Similar to list.insert()."""
self._type_checker.CheckValue(value)
self._values.insert(key, value)
self._message_listener.ByteSizeDirty()
if len(self._values) == 1:
self._message_listener.TransitionToNonempty()
def extend(self, elem_seq):
"""Extends by appending the given sequence. Similar to list.extend()."""
if not elem_seq:
return
orig_empty = len(self._values) == 0
new_values = []
for elem in elem_seq:
self._type_checker.CheckValue(elem)
new_values.append(elem)
self._values.extend(new_values)
self._message_listener.ByteSizeDirty()
if orig_empty:
self._message_listener.TransitionToNonempty()
def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.ByteSizeDirty()
def __setitem__(self, key, value):
"""Sets the item on the specified position."""
# No need to call TransitionToNonempty(), since if we're able to
# set the element at this index, we were already nonempty before
# this method was called.
self._message_listener.ByteSizeDirty()
self._type_checker.CheckValue(value)
self._values[key] = value
def __getslice__(self, start, stop):
"""Retrieves the subset of items from between the specified indices."""
return self._values[start:stop]
def __setslice__(self, start, stop, values):
"""Sets the subset of items from between the specified indices."""
new_values = []
for value in values:
self._type_checker.CheckValue(value)
new_values.append(value)
self._values[start:stop] = new_values
self._message_listener.ByteSizeDirty()
def __delitem__(self, key):
"""Deletes the item at the specified position."""
del self._values[key]
self._message_listener.ByteSizeDirty()
def __delslice__(self, start, stop):
"""Deletes the subset of items from between the specified indices."""
del self._values[start:stop]
self._message_listener.ByteSizeDirty()
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
# Special case for the same type which should be common and fast.
if isinstance(other, self.__class__):
return other._values == self._values
# We are presumably comparing against some other sequence type.
return other == self._values
class RepeatedCompositeFieldContainer(BaseContainer):
"""Simple, list-like container for holding repeated composite fields."""
# Disallows assignment to other attributes.
__slots__ = ['_message_descriptor']
def __init__(self, message_listener, message_descriptor):
"""
Note that we pass in a descriptor instead of the generated directly,
since at the time we construct a _RepeatedCompositeFieldContainer we
haven't yet necessarily initialized the type that will be contained in the
container.
Args:
message_listener: A MessageListener implementation.
The RepeatedCompositeFieldContainer will call this object's
TransitionToNonempty() method when it transitions from being empty to
being nonempty.
message_descriptor: A Descriptor instance describing the protocol type
that should be present in this container. We'll use the
_concrete_class field of this descriptor when the client calls add().
"""
super(RepeatedCompositeFieldContainer, self).__init__(message_listener)
self._message_descriptor = message_descriptor
def add(self):
new_element = self._message_descriptor._concrete_class()
new_element._SetListener(self._message_listener)
self._values.append(new_element)
self._message_listener.ByteSizeDirty()
self._message_listener.TransitionToNonempty()
return new_element
def __getslice__(self, start, stop):
"""Retrieves the subset of items from between the specified indices."""
return self._values[start:stop]
def __delitem__(self, key):
"""Deletes the item at the specified position."""
del self._values[key]
self._message_listener.ByteSizeDirty()
def __delslice__(self, start, stop):
"""Deletes the subset of items from between the specified indices."""
del self._values[start:stop]
self._message_listener.ByteSizeDirty()
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
return self._values == other._values
import pytest
from pybind11_tests import stl as m
from pybind11_tests import UserType
from pybind11_tests import ConstructorStats
def test_vector(doc):
"""std::vector <-> list"""
lst = m.cast_vector()
assert lst == [1]
lst.append(2)
assert m.load_vector(lst)
assert m.load_vector(tuple(lst))
assert m.cast_bool_vector() == [True, False]
assert m.load_bool_vector([True, False])
assert doc(m.cast_vector) == "cast_vector() -> List[int]"
assert doc(m.load_vector) == "load_vector(arg0: List[int]) -> bool"
# Test regression caused by 936: pointers to stl containers weren't castable
assert m.cast_ptr_vector() == ["lvalue", "lvalue"]
def test_deque(doc):
"""std::deque <-> list"""
lst = m.cast_deque()
assert lst == [1]
lst.append(2)
assert m.load_deque(lst)
assert m.load_deque(tuple(lst))
def test_array(doc):
"""std::array <-> list"""
lst = m.cast_array()
assert lst == [1, 2]
assert m.load_array(lst)
assert doc(m.cast_array) == "cast_array() -> List[int[2]]"
assert doc(m.load_array) == "load_array(arg0: List[int[2]]) -> bool"
def test_valarray(doc):
"""std::valarray <-> list"""
lst = m.cast_valarray()
assert lst == [1, 4, 9]
assert m.load_valarray(lst)
assert doc(m.cast_valarray) == "cast_valarray() -> List[int]"
assert doc(m.load_valarray) == "load_valarray(arg0: List[int]) -> bool"
def test_map(doc):
"""std::map <-> dict"""
d = m.cast_map()
assert d == {"key": "value"}
assert "key" in d
d["key2"] = "value2"
assert "key2" in d
assert m.load_map(d)
assert doc(m.cast_map) == "cast_map() -> Dict[str, str]"
assert doc(m.load_map) == "load_map(arg0: Dict[str, str]) -> bool"
def test_set(doc):
"""std::set <-> set"""
s = m.cast_set()
assert s == {"key1", "key2"}
s.add("key3")
assert m.load_set(s)
assert doc(m.cast_set) == "cast_set() -> Set[str]"
assert doc(m.load_set) == "load_set(arg0: Set[str]) -> bool"
def test_recursive_casting():
"""Tests that stl casters preserve lvalue/rvalue context for container values"""
assert m.cast_rv_vector() == ["rvalue", "rvalue"]
assert m.cast_lv_vector() == ["lvalue", "lvalue"]
assert m.cast_rv_array() == ["rvalue", "rvalue", "rvalue"]
assert m.cast_lv_array() == ["lvalue", "lvalue"]
assert m.cast_rv_map() == {"a": "rvalue"}
assert m.cast_lv_map() == {"a": "lvalue", "b": "lvalue"}
assert m.cast_rv_nested() == [[[{"b": "rvalue", "c": "rvalue"}], [{"a": "rvalue"}]]]
assert m.cast_lv_nested() == {
"a": [[["lvalue", "lvalue"]], [["lvalue", "lvalue"]]],
"b": [[["lvalue", "lvalue"], ["lvalue", "lvalue"]]]
}
# Issue #853 test case:
z = m.cast_unique_ptr_vector()
assert z[0].value == 7 and z[1].value == 42
def test_move_out_container():
"""Properties use the `reference_internal` policy by default. If the underlying function
returns an rvalue, the policy is automatically changed to `move` to avoid referencing
a temporary. In case the return value is a container of user-defined types, the policy
also needs to be applied to the elements, not just the container."""
c = m.MoveOutContainer()
moved_out_list = c.move_list
assert [x.value for x in moved_out_list] == [0, 1, 2]
@pytest.mark.skipif(not hasattr(m, "has_optional"), reason='no ')
def test_optional():
assert m.double_or_zero(None) == 0
assert m.double_or_zero(42) == 84
pytest.raises(TypeError, m.double_or_zero, 'foo')
assert m.half_or_none(0) is None
assert m.half_or_none(42) == 21
pytest.raises(TypeError, m.half_or_none, 'foo')
assert m.test_nullopt() == 42
assert m.test_nullopt(None) == 42
assert m.test_nullopt(42) == 42
assert m.test_nullopt(43) == 43
assert m.test_no_assign() == 42
assert m.test_no_assign(None) == 42
assert m.test_no_assign(m.NoAssign(43)) == 43
pytest.raises(TypeError, m.test_no_assign, 43)
assert m.nodefer_none_optional(None)
@pytest.mark.skipif(not hasattr(m, "has_exp_optional"), reason='no ')
def test_exp_optional():
assert m.double_or_zero_exp(None) == 0
assert m.double_or_zero_exp(42) == 84
pytest.raises(TypeError, m.double_or_zero_exp, 'foo')
assert m.half_or_none_exp(0) is None
assert m.half_or_none_exp(42) == 21
pytest.raises(TypeError, m.half_or_none_exp, 'foo')
assert m.test_nullopt_exp() == 42
assert m.test_nullopt_exp(None) == 42
assert m.test_nullopt_exp(42) == 42
assert m.test_nullopt_exp(43) == 43
assert m.test_no_assign_exp() == 42
assert m.test_no_assign_exp(None) == 42
assert m.test_no_assign_exp(m.NoAssign(43)) == 43
pytest.raises(TypeError, m.test_no_assign_exp, 43)
@pytest.mark.skipif(not hasattr(m, "load_variant"), reason='no ')
def test_variant(doc):
assert m.load_variant(1) == "int"
assert m.load_variant("1") == "std::string"
assert m.load_variant(1.0) == "double"
assert m.load_variant(None) == "std::nullptr_t"
assert m.load_variant_2pass(1) == "int"
assert m.load_variant_2pass(1.0) == "double"
assert m.cast_variant() == (5, "Hello")
assert doc(m.load_variant) == "load_variant(arg0: Union[int, str, float, None]) -> str"
def test_vec_of_reference_wrapper():
"""#171: Can't return reference wrappers (or STL structures containing them)"""
assert str(m.return_vec_of_reference_wrapper(UserType(4))) == \
"[UserType(1), UserType(2), UserType(3), UserType(4)]"
def test_stl_pass_by_pointer(msg):
"""Passing nullptr or None to an STL container pointer is not expected to work"""
with pytest.raises(TypeError) as excinfo:
m.stl_pass_by_pointer() # default value is `nullptr`
assert msg(excinfo.value) == """
stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:
1. (v: List[int] = None) -> List[int]
Invoked with:
""" # noqa: E501 line too long
with pytest.raises(TypeError) as excinfo:
m.stl_pass_by_pointer(None)
assert msg(excinfo.value) == """
stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:
1. (v: List[int] = None) -> List[int]
Invoked with: None
""" # noqa: E501 line too long
assert m.stl_pass_by_pointer([1, 2, 3]) == [1, 2, 3]
def test_missing_header_message():
"""Trying convert `list` to a `std::vector`, or vice versa, without including
should result in a helpful suggestion in the error message"""
import pybind11_cross_module_tests as cm
expected_message = ("Did you forget to `#include `? Or ,\n"
", , etc. Some automatic\n"
"conversions are optional and require extra headers to be included\n"
"when compiling your pybind11 module.")
with pytest.raises(TypeError) as excinfo:
cm.missing_header_arg([1.0, 2.0, 3.0])
assert expected_message in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
cm.missing_header_return()
assert expected_message in str(excinfo.value)
def test_function_with_string_and_vector_string_arg():
"""Check if a string is NOT implicitly converted to a list, which was the
behavior before fix of issue #1258"""
assert m.func_with_string_or_vector_string_arg_overload(('A', 'B', )) == 2
assert m.func_with_string_or_vector_string_arg_overload(['A', 'B']) == 2
assert m.func_with_string_or_vector_string_arg_overload('A') == 3
def test_stl_ownership():
cstats = ConstructorStats.get(m.Placeholder)
assert cstats.alive() == 0
r = m.test_stl_ownership()
assert len(r) == 1
del r
assert cstats.alive() == 0
def test_array_cast_sequence():
assert m.array_cast_sequence((1, 2, 3)) == [1, 2, 3]
def test_issue_1561():
""" check fix for issue #1561 """
bar = m.Issue1561Outer()
bar.list = [m.Issue1561Inner('bar')]
bar.list
assert bar.list[0].data == 'bar'
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for training models.
See the @{$python/train} guide.
@@Optimizer
@@GradientDescentOptimizer
@@AdadeltaOptimizer
@@AdagradOptimizer
@@AdagradDAOptimizer
@@MomentumOptimizer
@@AdamOptimizer
@@FtrlOptimizer
@@ProximalGradientDescentOptimizer
@@ProximalAdagradOptimizer
@@RMSPropOptimizer
@@gradients
@@AggregationMethod
@@stop_gradient
@@hessians
@@clip_by_value
@@clip_by_norm
@@clip_by_average_norm
@@clip_by_global_norm
@@global_norm
@@cosine_decay
@@linear_cosine_decay
@@noisy_linear_cosine_decay
@@exponential_decay
@@inverse_time_decay
@@natural_exp_decay
@@piecewise_constant
@@polynomial_decay
@@ExponentialMovingAverage
@@Coordinator
@@QueueRunner
@@LooperThread
@@add_queue_runner
@@start_queue_runners
@@Server
@@Supervisor
@@SessionManager
@@ClusterSpec
@@replica_device_setter
@@MonitoredTrainingSession
@@MonitoredSession
@@SingularMonitoredSession
@@Scaffold
@@SessionCreator
@@ChiefSessionCreator
@@WorkerSessionCreator
@@summary_iterator
@@SessionRunHook
@@SessionRunArgs
@@SessionRunContext
@@SessionRunValues
@@LoggingTensorHook
@@StopAtStepHook
@@CheckpointSaverHook
@@CheckpointSaverListener
@@NewCheckpointReader
@@StepCounterHook
@@NanLossDuringTrainingError
@@NanTensorHook
@@SummarySaverHook
@@GlobalStepWaiterHook
@@FinalOpsHook