diff --git "a/codeparrot-valid_1021.txt" "b/codeparrot-valid_1021.txt"
new file mode 100644--- /dev/null
+++ "b/codeparrot-valid_1021.txt"
@@ -0,0 +1,10000 @@
+ kind=_KEYWORD_ONLY,
+ default=default))
+ # **kwargs
+ if func_code.co_flags & 0x08:
+ index = pos_count + keyword_only_count
+ if func_code.co_flags & 0x04:
+ index += 1
+
+ name = arg_names[index]
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=_VAR_KEYWORD))
+
+ return cls(parameters,
+ return_annotation=annotations.get('return', _empty),
+ __validate_parameters__=False)
+
+ @property
+ def parameters(self):
+ return self._parameters
+
+ @property
+ def return_annotation(self):
+ return self._return_annotation
+
+ def replace(self, *, parameters=_void, return_annotation=_void):
+ '''Creates a customized copy of the Signature.
+ Pass 'parameters' and/or 'return_annotation' arguments
+ to override them in the new copy.
+ '''
+
+ if parameters is _void:
+ parameters = self.parameters.values()
+
+ if return_annotation is _void:
+ return_annotation = self._return_annotation
+
+ return type(self)(parameters,
+ return_annotation=return_annotation)
+
+ def __eq__(self, other):
+ if (not issubclass(type(other), Signature) or
+ self.return_annotation != other.return_annotation or
+ len(self.parameters) != len(other.parameters)):
+ return False
+
+ other_positions = {param: idx
+ for idx, param in enumerate(other.parameters.keys())}
+
+ for idx, (param_name, param) in enumerate(self.parameters.items()):
+ if param.kind == _KEYWORD_ONLY:
+ try:
+ other_param = other.parameters[param_name]
+ except KeyError:
+ return False
+ else:
+ if param != other_param:
+ return False
+ else:
+ try:
+ other_idx = other_positions[param_name]
+ except KeyError:
+ return False
+ else:
+ if (idx != other_idx or
+ param != other.parameters[param_name]):
+ return False
+
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _bind(self, args, kwargs, *, partial=False):
+ '''Private method. Don't use directly.'''
+
+ arguments = OrderedDict()
+
+ parameters = iter(self.parameters.values())
+ parameters_ex = ()
+ arg_vals = iter(args)
+
+ if partial:
+ # Support for binding arguments to 'functools.partial' objects.
+ # See 'functools.partial' case in 'signature()' implementation
+ # for details.
+ for param_name, param in self.parameters.items():
+ if (param._partial_kwarg and param_name not in kwargs):
+ # Simulating 'functools.partial' behavior
+ kwargs[param_name] = param.default
+
+ while True:
+ # Let's iterate through the positional arguments and corresponding
+ # parameters
+ try:
+ arg_val = next(arg_vals)
+ except StopIteration:
+ # No more positional arguments
+ try:
+ param = next(parameters)
+ except StopIteration:
+ # No more parameters. That's it. Just need to check that
+ # we have no `kwargs` after this while loop
+ break
+ else:
+ if param.kind == _VAR_POSITIONAL:
+ # That's OK, just empty *args. Let's start parsing
+ # kwargs
+ break
+ elif param.name in kwargs:
+ if param.kind == _POSITIONAL_ONLY:
+ msg = '{arg!r} parameter is positional only, ' \
+ 'but was passed as a keyword'
+ msg = msg.format(arg=param.name)
+ raise TypeError(msg) from None
+ parameters_ex = (param,)
+ break
+ elif (param.kind == _VAR_KEYWORD or
+ param.default is not _empty):
+ # That's fine too - we have a default value for this
+ # parameter. So, lets start parsing `kwargs`, starting
+ # with the current parameter
+ parameters_ex = (param,)
+ break
+ else:
+ if partial:
+ parameters_ex = (param,)
+ break
+ else:
+ msg = '{arg!r} parameter lacking default value'
+ msg = msg.format(arg=param.name)
+ raise TypeError(msg) from None
+ else:
+ # We have a positional argument to process
+ try:
+ param = next(parameters)
+ except StopIteration:
+ raise TypeError('too many positional arguments') from None
+ else:
+ if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
+ # Looks like we have no parameter for this positional
+ # argument
+ raise TypeError('too many positional arguments')
+
+ if param.kind == _VAR_POSITIONAL:
+ # We have an '*args'-like argument, let's fill it with
+ # all positional arguments we have left and move on to
+ # the next phase
+ values = [arg_val]
+ values.extend(arg_vals)
+ arguments[param.name] = tuple(values)
+ break
+
+ if param.name in kwargs:
+ raise TypeError('multiple values for argument '
+ '{arg!r}'.format(arg=param.name))
+
+ arguments[param.name] = arg_val
+
+ # Now, we iterate through the remaining parameters to process
+ # keyword arguments
+ kwargs_param = None
+ for param in itertools.chain(parameters_ex, parameters):
+ if param.kind == _POSITIONAL_ONLY:
+ # This should never happen in case of a properly built
+ # Signature object (but let's have this check here
+ # to ensure correct behaviour just in case)
+ raise TypeError('{arg!r} parameter is positional only, '
+ 'but was passed as a keyword'. \
+ format(arg=param.name))
+
+ if param.kind == _VAR_KEYWORD:
+ # Memorize that we have a '**kwargs'-like parameter
+ kwargs_param = param
+ continue
+
+ param_name = param.name
+ try:
+ arg_val = kwargs.pop(param_name)
+ except KeyError:
+ # We have no value for this parameter. It's fine though,
+ # if it has a default value, or it is an '*args'-like
+ # parameter, left alone by the processing of positional
+ # arguments.
+ if (not partial and param.kind != _VAR_POSITIONAL and
+ param.default is _empty):
+ raise TypeError('{arg!r} parameter lacking default value'. \
+ format(arg=param_name)) from None
+
+ else:
+ arguments[param_name] = arg_val
+
+ if kwargs:
+ if kwargs_param is not None:
+ # Process our '**kwargs'-like parameter
+ arguments[kwargs_param.name] = kwargs
+ else:
+ raise TypeError('too many keyword arguments')
+
+ return self._bound_arguments_cls(self, arguments)
+
+ def bind(__bind_self, *args, **kwargs):
+ '''Get a BoundArguments object, that maps the passed `args`
+ and `kwargs` to the function's signature. Raises `TypeError`
+ if the passed arguments can not be bound.
+ '''
+ return __bind_self._bind(args, kwargs)
+
+ def bind_partial(__bind_self, *args, **kwargs):
+ '''Get a BoundArguments object, that partially maps the
+ passed `args` and `kwargs` to the function's signature.
+ Raises `TypeError` if the passed arguments can not be bound.
+ '''
+ return __bind_self._bind(args, kwargs, partial=True)
+
+ def __str__(self):
+ result = []
+ render_kw_only_separator = True
+ for idx, param in enumerate(self.parameters.values()):
+ formatted = str(param)
+
+ kind = param.kind
+ if kind == _VAR_POSITIONAL:
+ # OK, we have an '*args'-like parameter, so we won't need
+ # a '*' to separate keyword-only arguments
+ render_kw_only_separator = False
+ elif kind == _KEYWORD_ONLY and render_kw_only_separator:
+ # We have a keyword-only parameter to render and we haven't
+ # rendered an '*args'-like parameter before, so add a '*'
+ # separator to the parameters list ("foo(arg1, *, arg2)" case)
+ result.append('*')
+ # This condition should be only triggered once, so
+ # reset the flag
+ render_kw_only_separator = False
+
+ result.append(formatted)
+
+ rendered = '({})'.format(', '.join(result))
+
+ if self.return_annotation is not _empty:
+ anno = formatannotation(self.return_annotation)
+ rendered += ' -> {}'.format(anno)
+
+ return rendered
+
+#!/usr/bin/env python
+# coding=utf-8
+from pybrain.tools.shortcuts import buildNetwork
+from pybrain.supervised.trainers import BackpropTrainer
+from src.supervised_learning import dataset
+from math import sqrt
+
+import cPickle as pickle
+
+
+def build(input_size, hidden_size, target_size):
+ return buildNetwork(input_size, hidden_size, target_size, bias=True)
+
+def train(network, dataset, epochs):
+ trainer = BackpropTrainer(network, dataset)
+ # trainer.trainUntilConvergence(verbose=True)
+ #
+ for i in range(epochs):
+ mse = trainer.train()
+ rmse = sqrt(mse)
+ print "training RMSE, epoch {}: {}".format(i + 1, rmse)
+
+def load_from_file(filename):
+ network = None
+ with open(filename, 'r') as pickle_file:
+ network = pickle.load(pickle_file)
+ return network
+
+def save_to_file(filename, network):
+ pickle.dump(network, open(filename, 'wb'))
+
+def train_and_save(input_size,
+ output_size,
+ hidden_size,
+ training_epochs,
+ network_filename,
+ dataset_filename):
+
+ network = build(input_size, hidden_size, output_size)
+ ds = dataset.load_from_file(dataset_filename)
+ train(network, ds, training_epochs)
+ save_to_file(network_filename, network)
+
+def rnd_config():
+ return {
+ "network_filename": "network/rnd_net.pickle",
+ "dataset_filename": "datasets/rnd.data",
+ }
+
+def best_avg_config():
+ return {
+ "network_filename": "network/best_avg_net.pickle",
+ "dataset_filename": "datasets/best_avg.data",
+ }
+
+def thinking_config():
+ return {
+ "network_filename": "network/thinking_net.pickle",
+ "dataset_filename": "datasets/thinking.data",
+ }
+
+def mixed_config():
+ return {
+ "network_filename": "network/mixed_net.pickle",
+ "dataset_filename": "datasets/mixed.data",
+ }
+
+if __name__ == '__main__':
+ input_size = 9
+ output_size = 1
+ hidden_size = 15
+ training_epochs = 200
+ train_and_save(
+ input_size,
+ output_size,
+ hidden_size,
+ training_epochs,
+ **mixed_config())
+
+
+# -*- coding: utf-8 -*-
+
+import sys
+import re
+import zlib
+import urllib2
+
+import xml.etree.ElementTree as et
+default_encoding = 'utf-8'
+if sys.getdefaultencoding() != default_encoding:
+ reload(sys)
+ sys.setdefaultencoding(default_encoding)
+
+class Feedback():
+ """Feeback used by Alfred Script Filter
+
+ Usage:
+ fb = Feedback()
+ fb.add_item('Hello', 'World')
+ fb.add_item('Foo', 'Bar')
+ print fb
+
+ """
+
+ def __init__(self):
+ self.feedback = et.Element('items')
+
+ def __repr__(self):
+ """XML representation used by Alfred
+
+ Returns:
+ XML string
+ """
+ return et.tostring(self.feedback)
+
+ def add_item(self, title, subtitle = "", arg = "", valid = "yes", autocomplete = "", icon = "icon.png"):
+ """
+ Add item to alfred Feedback
+
+ Args:
+ title(str): the title displayed by Alfred
+ Keyword Args:
+ subtitle(str): the subtitle displayed by Alfred
+ arg(str): the value returned by alfred when item is selected
+ valid(str): whether or not the entry can be selected in Alfred to trigger an action
+ autcomplete(str): the text to be inserted if an invalid item is selected. This is only used if 'valid' is 'no'
+ icon(str): filename of icon that Alfred will display
+ """
+ item = et.SubElement(self.feedback, 'item', uid=str(len(self.feedback)), arg=arg, valid=valid, autocomplete=autocomplete)
+ _title = et.SubElement(item, 'title')
+ _title.text = title
+ _sub = et.SubElement(item, 'subtitle')
+ _sub.text = subtitle
+ _icon = et.SubElement(item, 'icon')
+ _icon.text = icon
+
+query = '{query}'
+url = "http://www.bilibili.com/search?keyword=%s&orderby=&formsubmit="%query
+req = urllib2.Request(url = url);
+content = urllib2.urlopen(req,timeout = 10).read();
+content = zlib.decompress(content, 16+zlib.MAX_WBITS)
+
+reg = r'
([^<]*)([^<]*)
';
+result = re.findall(reg,content,re.S)
+fb = Feedback()
+
+try:
+ for item in result:
+ avnum = item[0]
+ avtype = item[1]
+ title = item[2].strip()
+ fb.add_item(title,subtitle="%s : http://www.bilibili.tv/video/%s"%(avtype,avnum),arg=avnum)
+
+except SyntaxError as e:
+ if ('EOF', 'EOL' in e.msg):
+ fb.add_item('...')
+ else:
+ fb.add_item('SyntaxError', e.msg)
+except Exception as e:
+ fb.add_item(e.__class__.__name__,subtitle=e.message)
+print fb
+
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Building Blocks of TensorFlow Debugger Command-Line Interface."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import copy
+import os
+import re
+import sre_constants
+import traceback
+
+import numpy as np
+import six
+
+from tensorflow.python.client import pywrap_tf_session
+from tensorflow.python.platform import gfile
+
+HELP_INDENT = " "
+
+EXPLICIT_USER_EXIT = "explicit_user_exit"
+REGEX_MATCH_LINES_KEY = "regex_match_lines"
+INIT_SCROLL_POS_KEY = "init_scroll_pos"
+
+MAIN_MENU_KEY = "mm:"
+
+
+class CommandLineExit(Exception):
+
+ def __init__(self, exit_token=None):
+ Exception.__init__(self)
+ self._exit_token = exit_token
+
+ @property
+ def exit_token(self):
+ return self._exit_token
+
+
+class RichLine(object):
+ """Rich single-line text.
+
+ Attributes:
+ text: A plain string, the raw text represented by this object. Should not
+ contain newlines.
+ font_attr_segs: A list of (start, end, font attribute) triples, representing
+ richness information applied to substrings of text.
+ """
+
+ def __init__(self, text="", font_attr=None):
+ """Construct a RichLine with no rich attributes or a single attribute.
+
+ Args:
+ text: Raw text string
+ font_attr: If specified, a single font attribute to be applied to the
+ entire text. Extending this object via concatenation allows creation
+ of text with varying attributes.
+ """
+ # TODO(ebreck) Make .text and .font_attr protected members when we no
+ # longer need public access.
+ self.text = text
+ if font_attr:
+ self.font_attr_segs = [(0, len(text), font_attr)]
+ else:
+ self.font_attr_segs = []
+
+ def __add__(self, other):
+ """Concatenate two chunks of maybe rich text to make a longer rich line.
+
+ Does not modify self.
+
+ Args:
+ other: Another piece of text to concatenate with this one.
+ If it is a plain str, it will be appended to this string with no
+ attributes. If it is a RichLine, it will be appended to this string
+ with its attributes preserved.
+
+ Returns:
+ A new RichLine comprising both chunks of text, with appropriate
+ attributes applied to the corresponding substrings.
+ """
+ ret = RichLine()
+ if isinstance(other, six.string_types):
+ ret.text = self.text + other
+ ret.font_attr_segs = self.font_attr_segs[:]
+ return ret
+ elif isinstance(other, RichLine):
+ ret.text = self.text + other.text
+ ret.font_attr_segs = self.font_attr_segs[:]
+ old_len = len(self.text)
+ for start, end, font_attr in other.font_attr_segs:
+ ret.font_attr_segs.append((old_len + start, old_len + end, font_attr))
+ return ret
+ else:
+ raise TypeError("%r cannot be concatenated with a RichLine" % other)
+
+ def __len__(self):
+ return len(self.text)
+
+
+def rich_text_lines_from_rich_line_list(rich_text_list, annotations=None):
+ """Convert a list of RichLine objects or strings to a RichTextLines object.
+
+ Args:
+ rich_text_list: a list of RichLine objects or strings
+ annotations: annotations for the resultant RichTextLines object.
+
+ Returns:
+ A corresponding RichTextLines object.
+ """
+ lines = []
+ font_attr_segs = {}
+ for i, rl in enumerate(rich_text_list):
+ if isinstance(rl, RichLine):
+ lines.append(rl.text)
+ if rl.font_attr_segs:
+ font_attr_segs[i] = rl.font_attr_segs
+ else:
+ lines.append(rl)
+ return RichTextLines(lines, font_attr_segs, annotations=annotations)
+
+
+def get_tensorflow_version_lines(include_dependency_versions=False):
+ """Generate RichTextLines with TensorFlow version info.
+
+ Args:
+ include_dependency_versions: Include the version of TensorFlow's key
+ dependencies, such as numpy.
+
+ Returns:
+ A formatted, multi-line `RichTextLines` object.
+ """
+ lines = ["TensorFlow version: %s" % pywrap_tf_session.__version__]
+ lines.append("")
+ if include_dependency_versions:
+ lines.append("Dependency version(s):")
+ lines.append(" numpy: %s" % np.__version__)
+ lines.append("")
+ return RichTextLines(lines)
+
+
+class RichTextLines(object):
+ """Rich multi-line text.
+
+ Line-by-line text output, with font attributes (e.g., color) and annotations
+ (e.g., indices in a multi-dimensional tensor). Used as the text output of CLI
+ commands. Can be rendered on terminal environments such as curses.
+
+ This is not to be confused with Rich Text Format (RTF). This class is for text
+ lines only.
+ """
+
+ def __init__(self, lines, font_attr_segs=None, annotations=None):
+ """Constructor of RichTextLines.
+
+ Args:
+ lines: A list of str or a single str, representing text output to
+ screen. The latter case is for convenience when the text output is
+ single-line.
+ font_attr_segs: A map from 0-based row index to a list of 3-tuples.
+ It lists segments in each row that have special font attributes, such
+ as colors, that are not the default attribute. For example:
+ {1: [(0, 3, "red"), (4, 7, "green")], 2: [(10, 20, "yellow")]}
+
+ In each tuple, the 1st element is the start index of the segment. The
+ 2nd element is the end index, in an "open interval" fashion. The 3rd
+ element is an object or a list of objects that represents the font
+ attribute. Colors are represented as strings as in the examples above.
+ annotations: A map from 0-based row index to any object for annotating
+ the row. A typical use example is annotating rows of the output as
+ indices in a multi-dimensional tensor. For example, consider the
+ following text representation of a 3x2x2 tensor:
+ [[[0, 0], [0, 0]],
+ [[0, 0], [0, 0]],
+ [[0, 0], [0, 0]]]
+ The annotation can indicate the indices of the first element shown in
+ each row, i.e.,
+ {0: [0, 0, 0], 1: [1, 0, 0], 2: [2, 0, 0]}
+ This information can make display of tensors on screen clearer and can
+ help the user navigate (scroll) to the desired location in a large
+ tensor.
+
+ Raises:
+ ValueError: If lines is of invalid type.
+ """
+ if isinstance(lines, list):
+ self._lines = lines
+ elif isinstance(lines, six.string_types):
+ self._lines = [lines]
+ else:
+ raise ValueError("Unexpected type in lines: %s" % type(lines))
+
+ self._font_attr_segs = font_attr_segs
+ if not self._font_attr_segs:
+ self._font_attr_segs = {}
+ # TODO(cais): Refactor to collections.defaultdict(list) to simplify code.
+
+ self._annotations = annotations
+ if not self._annotations:
+ self._annotations = {}
+ # TODO(cais): Refactor to collections.defaultdict(list) to simplify code.
+
+ @property
+ def lines(self):
+ return self._lines
+
+ @property
+ def font_attr_segs(self):
+ return self._font_attr_segs
+
+ @property
+ def annotations(self):
+ return self._annotations
+
+ def num_lines(self):
+ return len(self._lines)
+
+ def slice(self, begin, end):
+ """Slice a RichTextLines object.
+
+ The object itself is not changed. A sliced instance is returned.
+
+ Args:
+ begin: (int) Beginning line index (inclusive). Must be >= 0.
+ end: (int) Ending line index (exclusive). Must be >= 0.
+
+ Returns:
+ (RichTextLines) Sliced output instance of RichTextLines.
+
+ Raises:
+ ValueError: If begin or end is negative.
+ """
+
+ if begin < 0 or end < 0:
+ raise ValueError("Encountered negative index.")
+
+ # Copy lines.
+ lines = self.lines[begin:end]
+
+ # Slice font attribute segments.
+ font_attr_segs = {}
+ for key in self.font_attr_segs:
+ if key >= begin and key < end:
+ font_attr_segs[key - begin] = self.font_attr_segs[key]
+
+ # Slice annotations.
+ annotations = {}
+ for key in self.annotations:
+ if not isinstance(key, int):
+ # Annotations can contain keys that are not line numbers.
+ annotations[key] = self.annotations[key]
+ elif key >= begin and key < end:
+ annotations[key - begin] = self.annotations[key]
+
+ return RichTextLines(
+ lines, font_attr_segs=font_attr_segs, annotations=annotations)
+
+ def extend(self, other):
+ """Extend this instance of RichTextLines with another instance.
+
+ The extension takes effect on the text lines, the font attribute segments,
+ as well as the annotations. The line indices in the font attribute
+ segments and the annotations are adjusted to account for the existing
+ lines. If there are duplicate, non-line-index fields in the annotations,
+ the value from the input argument "other" will override that in this
+ instance.
+
+ Args:
+ other: (RichTextLines) The other RichTextLines instance to be appended at
+ the end of this instance.
+ """
+
+ orig_num_lines = self.num_lines() # Record original number of lines.
+
+ # Merge the lines.
+ self._lines.extend(other.lines)
+
+ # Merge the font_attr_segs.
+ for line_index in other.font_attr_segs:
+ self._font_attr_segs[orig_num_lines + line_index] = (
+ other.font_attr_segs[line_index])
+
+ # Merge the annotations.
+ for key in other.annotations:
+ if isinstance(key, int):
+ self._annotations[orig_num_lines + key] = (other.annotations[key])
+ else:
+ self._annotations[key] = other.annotations[key]
+
+ def _extend_before(self, other):
+ """Add another RichTextLines object to the front.
+
+ Args:
+ other: (RichTextLines) The other object to add to the front to this
+ object.
+ """
+
+ other_num_lines = other.num_lines() # Record original number of lines.
+
+ # Merge the lines.
+ self._lines = other.lines + self._lines
+
+ # Merge the font_attr_segs.
+ new_font_attr_segs = {}
+ for line_index in self.font_attr_segs:
+ new_font_attr_segs[other_num_lines + line_index] = (
+ self.font_attr_segs[line_index])
+ new_font_attr_segs.update(other.font_attr_segs)
+ self._font_attr_segs = new_font_attr_segs
+
+ # Merge the annotations.
+ new_annotations = {}
+ for key in self._annotations:
+ if isinstance(key, int):
+ new_annotations[other_num_lines + key] = (self.annotations[key])
+ else:
+ new_annotations[key] = other.annotations[key]
+
+ new_annotations.update(other.annotations)
+ self._annotations = new_annotations
+
+ def append(self, line, font_attr_segs=None):
+ """Append a single line of text.
+
+ Args:
+ line: (str) The text to be added to the end.
+ font_attr_segs: (list of tuples) Font attribute segments of the appended
+ line.
+ """
+
+ self._lines.append(line)
+ if font_attr_segs:
+ self._font_attr_segs[len(self._lines) - 1] = font_attr_segs
+
+ def append_rich_line(self, rich_line):
+ self.append(rich_line.text, rich_line.font_attr_segs)
+
+ def prepend(self, line, font_attr_segs=None):
+ """Prepend (i.e., add to the front) a single line of text.
+
+ Args:
+ line: (str) The text to be added to the front.
+ font_attr_segs: (list of tuples) Font attribute segments of the appended
+ line.
+ """
+
+ other = RichTextLines(line)
+ if font_attr_segs:
+ other.font_attr_segs[0] = font_attr_segs
+ self._extend_before(other)
+
+ def write_to_file(self, file_path):
+ """Write the object itself to file, in a plain format.
+
+ The font_attr_segs and annotations are ignored.
+
+ Args:
+ file_path: (str) path of the file to write to.
+ """
+
+ with gfile.Open(file_path, "w") as f:
+ for line in self._lines:
+ f.write(line + "\n")
+
+ # TODO(cais): Add a method to allow appending to a line in RichTextLines with
+ # both text and font_attr_segs.
+
+
+def regex_find(orig_screen_output, regex, font_attr):
+ """Perform regex match in rich text lines.
+
+ Produces a new RichTextLines object with font_attr_segs containing highlighted
+ regex matches.
+
+ Example use cases include:
+ 1) search for specific items in a large list of items, and
+ 2) search for specific numerical values in a large tensor.
+
+ Args:
+ orig_screen_output: The original RichTextLines, in which the regex find
+ is to be performed.
+ regex: The regex used for matching.
+ font_attr: Font attribute used for highlighting the found result.
+
+ Returns:
+ A modified copy of orig_screen_output.
+
+ Raises:
+ ValueError: If input str regex is not a valid regular expression.
+ """
+ new_screen_output = RichTextLines(
+ orig_screen_output.lines,
+ font_attr_segs=copy.deepcopy(orig_screen_output.font_attr_segs),
+ annotations=orig_screen_output.annotations)
+
+ try:
+ re_prog = re.compile(regex)
+ except sre_constants.error:
+ raise ValueError("Invalid regular expression: \"%s\"" % regex)
+
+ regex_match_lines = []
+ for i, line in enumerate(new_screen_output.lines):
+ find_it = re_prog.finditer(line)
+
+ match_segs = []
+ for match in find_it:
+ match_segs.append((match.start(), match.end(), font_attr))
+
+ if match_segs:
+ if i not in new_screen_output.font_attr_segs:
+ new_screen_output.font_attr_segs[i] = match_segs
+ else:
+ new_screen_output.font_attr_segs[i].extend(match_segs)
+ new_screen_output.font_attr_segs[i] = sorted(
+ new_screen_output.font_attr_segs[i], key=lambda x: x[0])
+ regex_match_lines.append(i)
+
+ new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines
+ return new_screen_output
+
+
+def wrap_rich_text_lines(inp, cols):
+ """Wrap RichTextLines according to maximum number of columns.
+
+ Produces a new RichTextLines object with the text lines, font_attr_segs and
+ annotations properly wrapped. This ought to be used sparingly, as in most
+ cases, command handlers producing RichTextLines outputs should know the
+ screen/panel width via the screen_info kwarg and should produce properly
+ length-limited lines in the output accordingly.
+
+ Args:
+ inp: Input RichTextLines object.
+ cols: Number of columns, as an int.
+
+ Returns:
+ 1) A new instance of RichTextLines, with line lengths limited to cols.
+ 2) A list of new (wrapped) line index. For example, if the original input
+ consists of three lines and only the second line is wrapped, and it's
+ wrapped into two lines, this return value will be: [0, 1, 3].
+ Raises:
+ ValueError: If inputs have invalid types.
+ """
+
+ new_line_indices = []
+
+ if not isinstance(inp, RichTextLines):
+ raise ValueError("Invalid type of input screen_output")
+
+ if not isinstance(cols, int):
+ raise ValueError("Invalid type of input cols")
+
+ out = RichTextLines([])
+
+ row_counter = 0 # Counter for new row index
+ for i, line in enumerate(inp.lines):
+ new_line_indices.append(out.num_lines())
+
+ if i in inp.annotations:
+ out.annotations[row_counter] = inp.annotations[i]
+
+ if len(line) <= cols:
+ # No wrapping.
+ out.lines.append(line)
+ if i in inp.font_attr_segs:
+ out.font_attr_segs[row_counter] = inp.font_attr_segs[i]
+
+ row_counter += 1
+ else:
+ # Wrap.
+ wlines = [] # Wrapped lines.
+
+ osegs = []
+ if i in inp.font_attr_segs:
+ osegs = inp.font_attr_segs[i]
+
+ idx = 0
+ while idx < len(line):
+ if idx + cols > len(line):
+ rlim = len(line)
+ else:
+ rlim = idx + cols
+
+ wlines.append(line[idx:rlim])
+ for seg in osegs:
+ if (seg[0] < rlim) and (seg[1] >= idx):
+ # Calculate left bound within wrapped line.
+ if seg[0] >= idx:
+ lb = seg[0] - idx
+ else:
+ lb = 0
+
+ # Calculate right bound within wrapped line.
+ if seg[1] < rlim:
+ rb = seg[1] - idx
+ else:
+ rb = rlim - idx
+
+ if rb > lb: # Omit zero-length segments.
+ wseg = (lb, rb, seg[2])
+ if row_counter not in out.font_attr_segs:
+ out.font_attr_segs[row_counter] = [wseg]
+ else:
+ out.font_attr_segs[row_counter].append(wseg)
+
+ idx += cols
+ row_counter += 1
+
+ out.lines.extend(wlines)
+
+ # Copy over keys of annotation that are not row indices.
+ for key in inp.annotations:
+ if not isinstance(key, int):
+ out.annotations[key] = inp.annotations[key]
+
+ return out, new_line_indices
+
+
+class CommandHandlerRegistry(object):
+ """Registry of command handlers for CLI.
+
+ Handler methods (callables) for user commands can be registered with this
+ class, which then is able to dispatch commands to the correct handlers and
+ retrieve the RichTextLines output.
+
+ For example, suppose you have the following handler defined:
+ def echo(argv, screen_info=None):
+ return RichTextLines(["arguments = %s" % " ".join(argv),
+ "screen_info = " + repr(screen_info)])
+
+ you can register the handler with the command prefix "echo" and alias "e":
+ registry = CommandHandlerRegistry()
+ registry.register_command_handler("echo", echo,
+ "Echo arguments, along with screen info", prefix_aliases=["e"])
+
+ then to invoke this command handler with some arguments and screen_info, do:
+ registry.dispatch_command("echo", ["foo", "bar"], screen_info={"cols": 80})
+
+ or with the prefix alias:
+ registry.dispatch_command("e", ["foo", "bar"], screen_info={"cols": 80})
+
+ The call will return a RichTextLines object which can be rendered by a CLI.
+ """
+
+ HELP_COMMAND = "help"
+ HELP_COMMAND_ALIASES = ["h"]
+ VERSION_COMMAND = "version"
+ VERSION_COMMAND_ALIASES = ["ver"]
+
+ def __init__(self):
+ # A dictionary from command prefix to handler.
+ self._handlers = {}
+
+ # A dictionary from prefix alias to prefix.
+ self._alias_to_prefix = {}
+
+ # A dictionary from prefix to aliases.
+ self._prefix_to_aliases = {}
+
+ # A dictionary from command prefix to help string.
+ self._prefix_to_help = {}
+
+ # Introductory text to help information.
+ self._help_intro = None
+
+ # Register a default handler for the command "help".
+ self.register_command_handler(
+ self.HELP_COMMAND,
+ self._help_handler,
+ "Print this help message.",
+ prefix_aliases=self.HELP_COMMAND_ALIASES)
+
+ # Register a default handler for the command "version".
+ self.register_command_handler(
+ self.VERSION_COMMAND,
+ self._version_handler,
+ "Print the versions of TensorFlow and its key dependencies.",
+ prefix_aliases=self.VERSION_COMMAND_ALIASES)
+
+ def register_command_handler(self,
+ prefix,
+ handler,
+ help_info,
+ prefix_aliases=None):
+ """Register a callable as a command handler.
+
+ Args:
+ prefix: Command prefix, i.e., the first word in a command, e.g.,
+ "print" as in "print tensor_1".
+ handler: A callable of the following signature:
+ foo_handler(argv, screen_info=None),
+ where argv is the argument vector (excluding the command prefix) and
+ screen_info is a dictionary containing information about the screen,
+ such as number of columns, e.g., {"cols": 100}.
+ The callable should return:
+ 1) a RichTextLines object representing the screen output.
+
+ The callable can also raise an exception of the type CommandLineExit,
+ which if caught by the command-line interface, will lead to its exit.
+ The exception can optionally carry an exit token of arbitrary type.
+ help_info: A help string.
+ prefix_aliases: Aliases for the command prefix, as a list of str. E.g.,
+ shorthands for the command prefix: ["p", "pr"]
+
+ Raises:
+ ValueError: If
+ 1) the prefix is empty, or
+ 2) handler is not callable, or
+ 3) a handler is already registered for the prefix, or
+ 4) elements in prefix_aliases clash with existing aliases.
+ 5) help_info is not a str.
+ """
+
+ if not prefix:
+ raise ValueError("Empty command prefix")
+
+ if prefix in self._handlers:
+ raise ValueError(
+ "A handler is already registered for command prefix \"%s\"" % prefix)
+
+ # Make sure handler is callable.
+ if not callable(handler):
+ raise ValueError("handler is not callable")
+
+ # Make sure that help info is a string.
+ if not isinstance(help_info, six.string_types):
+ raise ValueError("help_info is not a str")
+
+ # Process prefix aliases.
+ if prefix_aliases:
+ for alias in prefix_aliases:
+ if self._resolve_prefix(alias):
+ raise ValueError(
+ "The prefix alias \"%s\" clashes with existing prefixes or "
+ "aliases." % alias)
+ self._alias_to_prefix[alias] = prefix
+
+ self._prefix_to_aliases[prefix] = prefix_aliases
+
+ # Store handler.
+ self._handlers[prefix] = handler
+
+ # Store help info.
+ self._prefix_to_help[prefix] = help_info
+
+ def dispatch_command(self, prefix, argv, screen_info=None):
+ """Handles a command by dispatching it to a registered command handler.
+
+ Args:
+ prefix: Command prefix, as a str, e.g., "print".
+ argv: Command argument vector, excluding the command prefix, represented
+ as a list of str, e.g.,
+ ["tensor_1"]
+ screen_info: A dictionary containing screen info, e.g., {"cols": 100}.
+
+ Returns:
+ An instance of RichTextLines or None. If any exception is caught during
+ the invocation of the command handler, the RichTextLines will wrap the
+ error type and message.
+
+ Raises:
+ ValueError: If
+ 1) prefix is empty, or
+ 2) no command handler is registered for the command prefix, or
+ 3) the handler is found for the prefix, but it fails to return a
+ RichTextLines or raise any exception.
+ CommandLineExit:
+ If the command handler raises this type of exception, this method will
+ simply pass it along.
+ """
+ if not prefix:
+ raise ValueError("Prefix is empty")
+
+ resolved_prefix = self._resolve_prefix(prefix)
+ if not resolved_prefix:
+ raise ValueError("No handler is registered for command prefix \"%s\"" %
+ prefix)
+
+ handler = self._handlers[resolved_prefix]
+ try:
+ output = handler(argv, screen_info=screen_info)
+ except CommandLineExit as e:
+ raise e
+ except SystemExit as e:
+ # Special case for syntax errors caught by argparse.
+ lines = ["Syntax error for command: %s" % prefix,
+ "For help, do \"help %s\"" % prefix]
+ output = RichTextLines(lines)
+
+ except BaseException as e: # pylint: disable=broad-except
+ lines = ["Error occurred during handling of command: %s %s:" %
+ (resolved_prefix, " ".join(argv)), "%s: %s" % (type(e), str(e))]
+
+ # Include traceback of the exception.
+ lines.append("")
+ lines.extend(traceback.format_exc().split("\n"))
+
+ output = RichTextLines(lines)
+
+ if not isinstance(output, RichTextLines) and output is not None:
+ raise ValueError(
+ "Return value from command handler %s is not None or a RichTextLines "
+ "instance" % str(handler))
+
+ return output
+
+ def is_registered(self, prefix):
+ """Test if a command prefix or its alias is has a registered handler.
+
+ Args:
+ prefix: A prefix or its alias, as a str.
+
+ Returns:
+ True iff a handler is registered for prefix.
+ """
+ return self._resolve_prefix(prefix) is not None
+
+ def get_help(self, cmd_prefix=None):
+ """Compile help information into a RichTextLines object.
+
+ Args:
+ cmd_prefix: Optional command prefix. As the prefix itself or one of its
+ aliases.
+
+ Returns:
+ A RichTextLines object containing the help information. If cmd_prefix
+ is None, the return value will be the full command-line help. Otherwise,
+ it will be the help information for the specified command.
+ """
+ if not cmd_prefix:
+ # Print full help information, in sorted order of the command prefixes.
+ help_info = RichTextLines([])
+ if self._help_intro:
+ # If help intro is available, show it at the beginning.
+ help_info.extend(self._help_intro)
+
+ sorted_prefixes = sorted(self._handlers)
+ for cmd_prefix in sorted_prefixes:
+ lines = self._get_help_for_command_prefix(cmd_prefix)
+ lines.append("")
+ lines.append("")
+ help_info.extend(RichTextLines(lines))
+
+ return help_info
+ else:
+ return RichTextLines(self._get_help_for_command_prefix(cmd_prefix))
+
+ def set_help_intro(self, help_intro):
+ """Set an introductory message to help output.
+
+ Args:
+ help_intro: (RichTextLines) Rich text lines appended to the
+ beginning of the output of the command "help", as introductory
+ information.
+ """
+ self._help_intro = help_intro
+
+ def _help_handler(self, args, screen_info=None):
+ """Command handler for "help".
+
+ "help" is a common command that merits built-in support from this class.
+
+ Args:
+ args: Command line arguments to "help" (not including "help" itself).
+ screen_info: (dict) Information regarding the screen, e.g., the screen
+ width in characters: {"cols": 80}
+
+ Returns:
+ (RichTextLines) Screen text output.
+ """
+
+ _ = screen_info # Unused currently.
+
+ if not args:
+ return self.get_help()
+ elif len(args) == 1:
+ return self.get_help(args[0])
+ else:
+ return RichTextLines(["ERROR: help takes only 0 or 1 input argument."])
+
+ def _version_handler(self, args, screen_info=None):
+ del args # Unused currently.
+ del screen_info # Unused currently.
+ return get_tensorflow_version_lines(include_dependency_versions=True)
+
+ def _resolve_prefix(self, token):
+ """Resolve command prefix from the prefix itself or its alias.
+
+ Args:
+ token: a str to be resolved.
+
+ Returns:
+ If resolvable, the resolved command prefix.
+ If not resolvable, None.
+ """
+ if token in self._handlers:
+ return token
+ elif token in self._alias_to_prefix:
+ return self._alias_to_prefix[token]
+ else:
+ return None
+
+ def _get_help_for_command_prefix(self, cmd_prefix):
+ """Compile the help information for a given command prefix.
+
+ Args:
+ cmd_prefix: Command prefix, as the prefix itself or one of its
+ aliases.
+
+ Returns:
+ A list of str as the help information fo cmd_prefix. If the cmd_prefix
+ does not exist, the returned list of str will indicate that.
+ """
+ lines = []
+
+ resolved_prefix = self._resolve_prefix(cmd_prefix)
+ if not resolved_prefix:
+ lines.append("Invalid command prefix: \"%s\"" % cmd_prefix)
+ return lines
+
+ lines.append(resolved_prefix)
+
+ if resolved_prefix in self._prefix_to_aliases:
+ lines.append(HELP_INDENT + "Aliases: " + ", ".join(
+ self._prefix_to_aliases[resolved_prefix]))
+
+ lines.append("")
+ help_lines = self._prefix_to_help[resolved_prefix].split("\n")
+ for line in help_lines:
+ lines.append(HELP_INDENT + line)
+
+ return lines
+
+
+class TabCompletionRegistry(object):
+ """Registry for tab completion responses."""
+
+ def __init__(self):
+ self._comp_dict = {}
+
+ # TODO(cais): Rename method names with "comp" to "*completion*" to avoid
+ # confusion.
+
+ def register_tab_comp_context(self, context_words, comp_items):
+ """Register a tab-completion context.
+
+ Register that, for each word in context_words, the potential tab-completions
+ are the words in comp_items.
+
+ A context word is a pre-existing, completed word in the command line that
+ determines how tab-completion works for another, incomplete word in the same
+ command line.
+ Completion items consist of potential candidates for the incomplete word.
+
+ To give a general example, a context word can be "drink", and the completion
+ items can be ["coffee", "tea", "water"]
+
+ Note: A context word can be empty, in which case the context is for the
+ top-level commands.
+
+ Args:
+ context_words: A list of context words belonging to the context being
+ registered. It is a list of str, instead of a single string, to support
+ synonym words triggering the same tab-completion context, e.g.,
+ both "drink" and the short-hand "dr" can trigger the same context.
+ comp_items: A list of completion items, as a list of str.
+
+ Raises:
+ TypeError: if the input arguments are not all of the correct types.
+ """
+
+ if not isinstance(context_words, list):
+ raise TypeError("Incorrect type in context_list: Expected list, got %s" %
+ type(context_words))
+
+ if not isinstance(comp_items, list):
+ raise TypeError("Incorrect type in comp_items: Expected list, got %s" %
+ type(comp_items))
+
+ # Sort the completion items on registration, so that later during
+ # get_completions calls, no sorting will be necessary.
+ sorted_comp_items = sorted(comp_items)
+
+ for context_word in context_words:
+ self._comp_dict[context_word] = sorted_comp_items
+
+ def deregister_context(self, context_words):
+ """Deregister a list of context words.
+
+ Args:
+ context_words: A list of context words to deregister, as a list of str.
+
+ Raises:
+ KeyError: if there are word(s) in context_words that do not correspond
+ to any registered contexts.
+ """
+
+ for context_word in context_words:
+ if context_word not in self._comp_dict:
+ raise KeyError("Cannot deregister unregistered context word \"%s\"" %
+ context_word)
+
+ for context_word in context_words:
+ del self._comp_dict[context_word]
+
+ def extend_comp_items(self, context_word, new_comp_items):
+ """Add a list of completion items to a completion context.
+
+ Args:
+ context_word: A single completion word as a string. The extension will
+ also apply to all other context words of the same context.
+ new_comp_items: (list of str) New completion items to add.
+
+ Raises:
+ KeyError: if the context word has not been registered.
+ """
+
+ if context_word not in self._comp_dict:
+ raise KeyError("Context word \"%s\" has not been registered" %
+ context_word)
+
+ self._comp_dict[context_word].extend(new_comp_items)
+ self._comp_dict[context_word] = sorted(self._comp_dict[context_word])
+
+ def remove_comp_items(self, context_word, comp_items):
+ """Remove a list of completion items from a completion context.
+
+ Args:
+ context_word: A single completion word as a string. The removal will
+ also apply to all other context words of the same context.
+ comp_items: Completion items to remove.
+
+ Raises:
+ KeyError: if the context word has not been registered.
+ """
+
+ if context_word not in self._comp_dict:
+ raise KeyError("Context word \"%s\" has not been registered" %
+ context_word)
+
+ for item in comp_items:
+ self._comp_dict[context_word].remove(item)
+
+ def get_completions(self, context_word, prefix):
+ """Get the tab completions given a context word and a prefix.
+
+ Args:
+ context_word: The context word.
+ prefix: The prefix of the incomplete word.
+
+ Returns:
+ (1) None if no registered context matches the context_word.
+ A list of str for the matching completion items. Can be an empty list
+ of a matching context exists, but no completion item matches the
+ prefix.
+ (2) Common prefix of all the words in the first return value. If the
+ first return value is None, this return value will be None, too. If
+ the first return value is not None, i.e., a list, this return value
+ will be a str, which can be an empty str if there is no common
+ prefix among the items of the list.
+ """
+
+ if context_word not in self._comp_dict:
+ return None, None
+
+ comp_items = self._comp_dict[context_word]
+ comp_items = sorted(
+ [item for item in comp_items if item.startswith(prefix)])
+
+ return comp_items, self._common_prefix(comp_items)
+
+ def _common_prefix(self, m):
+ """Given a list of str, returns the longest common prefix.
+
+ Args:
+ m: (list of str) A list of strings.
+
+ Returns:
+ (str) The longest common prefix.
+ """
+ if not m:
+ return ""
+
+ s1 = min(m)
+ s2 = max(m)
+ for i, c in enumerate(s1):
+ if c != s2[i]:
+ return s1[:i]
+
+ return s1
+
+
+class CommandHistory(object):
+ """Keeps command history and supports lookup."""
+
+ _HISTORY_FILE_NAME = ".tfdbg_history"
+
+ def __init__(self, limit=100, history_file_path=None):
+ """CommandHistory constructor.
+
+ Args:
+ limit: Maximum number of the most recent commands that this instance
+ keeps track of, as an int.
+ history_file_path: (str) Manually specified path to history file. Used in
+ testing.
+ """
+
+ self._commands = []
+ self._limit = limit
+ self._history_file_path = (
+ history_file_path or self._get_default_history_file_path())
+ self._load_history_from_file()
+
+ def _load_history_from_file(self):
+ if os.path.isfile(self._history_file_path):
+ try:
+ with open(self._history_file_path, "rt") as history_file:
+ commands = history_file.readlines()
+ self._commands = [command.strip() for command in commands
+ if command.strip()]
+
+ # Limit the size of the history file.
+ if len(self._commands) > self._limit:
+ self._commands = self._commands[-self._limit:]
+ with open(self._history_file_path, "wt") as history_file:
+ for command in self._commands:
+ history_file.write(command + "\n")
+ except IOError:
+ print("WARNING: writing history file failed.")
+
+ def _add_command_to_history_file(self, command):
+ try:
+ with open(self._history_file_path, "at") as history_file:
+ history_file.write(command + "\n")
+ except IOError:
+ pass
+
+ @classmethod
+ def _get_default_history_file_path(cls):
+ return os.path.join(os.path.expanduser("~"), cls._HISTORY_FILE_NAME)
+
+ def add_command(self, command):
+ """Add a command to the command history.
+
+ Args:
+ command: The history command, as a str.
+
+ Raises:
+ TypeError: if command is not a str.
+ """
+
+ if self._commands and command == self._commands[-1]:
+ # Ignore repeating commands in a row.
+ return
+
+ if not isinstance(command, six.string_types):
+ raise TypeError("Attempt to enter non-str entry to command history")
+
+ self._commands.append(command)
+
+ if len(self._commands) > self._limit:
+ self._commands = self._commands[-self._limit:]
+
+ self._add_command_to_history_file(command)
+
+ def most_recent_n(self, n):
+ """Look up the n most recent commands.
+
+ Args:
+ n: Number of most recent commands to look up.
+
+ Returns:
+ A list of n most recent commands, or all available most recent commands,
+ if n exceeds size of the command history, in chronological order.
+ """
+
+ return self._commands[-n:]
+
+ def lookup_prefix(self, prefix, n):
+ """Look up the n most recent commands that starts with prefix.
+
+ Args:
+ prefix: The prefix to lookup.
+ n: Number of most recent commands to look up.
+
+ Returns:
+ A list of n most recent commands that have the specified prefix, or all
+ available most recent commands that have the prefix, if n exceeds the
+ number of history commands with the prefix.
+ """
+
+ commands = [cmd for cmd in self._commands if cmd.startswith(prefix)]
+
+ return commands[-n:]
+
+ # TODO(cais): Lookup by regex.
+
+
+class MenuItem(object):
+ """A class for an item in a text-based menu."""
+
+ def __init__(self, caption, content, enabled=True):
+ """Menu constructor.
+
+ TODO(cais): Nested menu is currently not supported. Support it.
+
+ Args:
+ caption: (str) caption of the menu item.
+ content: Content of the menu item. For a menu item that triggers
+ a command, for example, content is the command string.
+ enabled: (bool) whether this menu item is enabled.
+ """
+
+ self._caption = caption
+ self._content = content
+ self._enabled = enabled
+
+ @property
+ def caption(self):
+ return self._caption
+
+ @property
+ def type(self):
+ return self._node_type
+
+ @property
+ def content(self):
+ return self._content
+
+ def is_enabled(self):
+ return self._enabled
+
+ def disable(self):
+ self._enabled = False
+
+ def enable(self):
+ self._enabled = True
+
+
+class Menu(object):
+ """A class for text-based menu."""
+
+ def __init__(self, name=None):
+ """Menu constructor.
+
+ Args:
+ name: (str or None) name of this menu.
+ """
+
+ self._name = name
+ self._items = []
+
+ def append(self, item):
+ """Append an item to the Menu.
+
+ Args:
+ item: (MenuItem) the item to be appended.
+ """
+ self._items.append(item)
+
+ def insert(self, index, item):
+ self._items.insert(index, item)
+
+ def num_items(self):
+ return len(self._items)
+
+ def captions(self):
+ return [item.caption for item in self._items]
+
+ def caption_to_item(self, caption):
+ """Get a MenuItem from the caption.
+
+ Args:
+ caption: (str) The caption to look up.
+
+ Returns:
+ (MenuItem) The first-match menu item with the caption, if any.
+
+ Raises:
+ LookupError: If a menu item with the caption does not exist.
+ """
+
+ captions = self.captions()
+ if caption not in captions:
+ raise LookupError("There is no menu item with the caption \"%s\"" %
+ caption)
+
+ return self._items[captions.index(caption)]
+
+ def format_as_single_line(self,
+ prefix=None,
+ divider=" | ",
+ enabled_item_attrs=None,
+ disabled_item_attrs=None):
+ """Format the menu as a single-line RichTextLines object.
+
+ Args:
+ prefix: (str) String added to the beginning of the line.
+ divider: (str) The dividing string between the menu items.
+ enabled_item_attrs: (list or str) Attributes applied to each enabled
+ menu item, e.g., ["bold", "underline"].
+ disabled_item_attrs: (list or str) Attributes applied to each
+ disabled menu item, e.g., ["red"].
+
+ Returns:
+ (RichTextLines) A single-line output representing the menu, with
+ font_attr_segs marking the individual menu items.
+ """
+
+ if (enabled_item_attrs is not None and
+ not isinstance(enabled_item_attrs, list)):
+ enabled_item_attrs = [enabled_item_attrs]
+
+ if (disabled_item_attrs is not None and
+ not isinstance(disabled_item_attrs, list)):
+ disabled_item_attrs = [disabled_item_attrs]
+
+ menu_line = prefix if prefix is not None else ""
+ attr_segs = []
+
+ for item in self._items:
+ menu_line += item.caption
+ item_name_begin = len(menu_line) - len(item.caption)
+
+ if item.is_enabled():
+ final_attrs = [item]
+ if enabled_item_attrs:
+ final_attrs.extend(enabled_item_attrs)
+ attr_segs.append((item_name_begin, len(menu_line), final_attrs))
+ else:
+ if disabled_item_attrs:
+ attr_segs.append(
+ (item_name_begin, len(menu_line), disabled_item_attrs))
+
+ menu_line += divider
+
+ return RichTextLines(menu_line, font_attr_segs={0: attr_segs})
+
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+
+from extensions_paths import CHROME_EXTENSIONS
+from test_file_system import MoveAllTo
+from test_util import ReadFile
+
+FAKE_TABS_IDL = '\n'.join([
+ '// Copyleft stuff.',
+ '',
+ '// Some description here.',
+ 'namespace fakeTabs {',
+ ' dictionary WasImplicitlyInlinedType {};',
+ ' interface Functions {',
+ ' static void myFunc(WasImplicitlyInlinedType arg);',
+ ' static void anotherFunc(WasImplicitlyInlinedType arg);',
+ ' };',
+ '};'])
+
+FAKE_TABS_WITH_INLINING_IDL = '\n'.join([
+ '// Copyleft stuff.',
+ '',
+ '// Some description here.',
+ 'namespace fakeTabs {',
+ ' dictionary WasImplicitlyInlinedType {};',
+ ' interface Functions {',
+ ' static void myFunc(WasImplicitlyInlinedType arg);',
+ ' };',
+ '};'])
+
+TABS_SCHEMA_BRANCHES = MoveAllTo(CHROME_EXTENSIONS, {
+ 'master': {
+ 'docs': {
+ 'templates': {
+ 'json': {
+ 'api_availabilities.json': '{}',
+ 'intro_tables.json': '{}'
+ }
+ }
+ },
+ 'api': {
+ '_api_features.json': json.dumps({
+ 'tabs.scheduledFunc': {
+ 'channel': 'stable'
+ }
+ }),
+ '_manifest_features.json': '{}',
+ '_permission_features.json': '{}',
+ 'fake_tabs.idl': FAKE_TABS_IDL,
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'type': 'any',
+ 'properties': {
+ 'url': {
+ 'type': 'any'
+ },
+ 'index': {
+ 'type': 'any'
+ },
+ 'selected': {
+ 'type': 'any'
+ },
+ 'id': {
+ 'type': 'any'
+ },
+ 'windowId': {
+ 'type': 'any'
+ }
+ }
+ },
+ {
+ 'id': 'InlinedType',
+ 'type': 'any',
+ 'inline_doc': True
+ },
+ {
+ 'id': 'InjectDetails',
+ 'type': 'any',
+ 'properties': {
+ 'allFrames': {
+ 'type': 'any'
+ },
+ 'code': {
+ 'type': 'any'
+ },
+ 'file': {
+ 'type':'any'
+ }
+ }
+ },
+ {
+ 'id': 'DeprecatedType',
+ 'type': 'any',
+ 'deprecated': 'This is deprecated'
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {
+ 'type': 'any'
+ },
+ 'fakeTabsProperty2': {
+ 'type': 'any'
+ },
+ 'fakeTabsProperty3': {
+ 'type': 'any'
+ }
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'type': 'function',
+ 'parameters': [
+ {
+ 'name': 'tab',
+ 'type': 'any'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'type': 'function',
+ 'parameters': [
+ {
+ 'name': 'tab',
+ 'type': 'any'
+ }
+ ]
+ },
+ {
+ 'name': 'tabId',
+ 'type': 'any'
+ }
+ ]
+ },
+ {
+ 'name': 'restrictedFunc'
+ },
+ {
+ 'name': 'scheduledFunc',
+ 'parameters': []
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onActivated',
+ 'type': 'event',
+ 'parameters': [
+ {
+ 'name': 'activeInfo',
+ 'type': 'any',
+ 'properties': {
+ 'tabId': {
+ 'type': 'any'
+ },
+ 'windowId': {
+ 'type': 'any'
+ }
+ }
+ }
+ ]
+ },
+ {
+ 'name': 'onUpdated',
+ 'type': 'event',
+ 'parameters': [
+ {
+ 'name': 'tabId',
+ 'type': 'any'
+ },
+ {
+ 'name': 'tab',
+ 'type': 'any'
+ },
+ {
+ 'name': 'changeInfo',
+ 'type': 'any',
+ 'properties': {
+ 'pinned': {
+ 'type': 'any'
+ },
+ 'status': {
+ 'type': 'any'
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1612': {
+ 'api': {
+ '_api_features.json': json.dumps({
+ 'tabs.scheduledFunc': {
+ 'channel': 'stable'
+ }
+ }),
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'fake_tabs.idl': FAKE_TABS_IDL,
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {},
+ 'windowId': {}
+ }
+ },
+ {
+ 'id': 'InjectDetails',
+ 'properties': {
+ 'allFrames': {},
+ 'code': {},
+ 'file': {}
+ }
+ },
+ {
+ 'id': 'DeprecatedType',
+ 'deprecated': 'This is deprecated'
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ },
+ {
+ 'name': 'tabId'
+ }
+ ]
+ },
+ {
+ 'name': 'restrictedFunc'
+ },
+ {
+ 'name': 'scheduledFunc',
+ 'parameters': []
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onActivated',
+ 'parameters': [
+ {
+ 'name': 'activeInfo',
+ 'properties': {
+ 'tabId': {},
+ 'windowId': {}
+ }
+ }
+ ]
+ },
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'tab'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1599': {
+ 'api': {
+ '_api_features.json': "{}",
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'fake_tabs.idl': FAKE_TABS_IDL,
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {},
+ 'windowId': {}
+ }
+ },
+ {
+ 'id': 'InjectDetails',
+ 'properties': {
+ 'allFrames': {},
+ 'code': {},
+ 'file': {}
+ }
+ },
+ {
+ 'id': 'DeprecatedType',
+ 'deprecated': 'This is deprecated'
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ },
+ {
+ 'name': 'tabId'
+ }
+ ]
+ },
+ {
+ 'name': 'restrictedFunc'
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onActivated',
+ 'parameters': [
+ {
+ 'name': 'activeInfo',
+ 'properties': {
+ 'tabId': {},
+ }
+ }
+ ]
+ },
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1547': {
+ 'api': {
+ '_api_features.json': json.dumps({
+ 'tabs.restrictedFunc': {
+ 'channel': 'dev'
+ }
+ }),
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'fake_tabs.idl': FAKE_TABS_IDL,
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {},
+ 'windowId': {}
+ }
+ },
+ {
+ 'id': 'InjectDetails',
+ 'properties': {
+ 'allFrames': {},
+ 'code': {},
+ 'file': {}
+ }
+ },
+ {
+ 'id': 'DeprecatedType',
+ 'deprecated': 'This is deprecated'
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ },
+ ]
+ },
+ {
+ 'name': 'restrictedFunc'
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1500': {
+ 'api': {
+ '_api_features.json': "{}",
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'fake_tabs.idl': FAKE_TABS_IDL,
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {},
+ 'windowId': {}
+ }
+ },
+ {
+ 'id': 'InjectDetails',
+ 'properties': {
+ 'allFrames': {},
+ }
+ },
+ {
+ 'id': 'DeprecatedType',
+ 'deprecated': 'This is deprecated'
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ },
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1453': {
+ 'api': {
+ '_api_features.json': "{}",
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'fake_tabs.idl': FAKE_TABS_IDL,
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {},
+ 'windowId': {}
+ }
+ },
+ {
+ 'id': 'InjectDetails',
+ 'properties': {
+ 'allFrames': {},
+ }
+ },
+ {
+ 'id': 'DeprecatedType',
+ 'deprecated': 'This is deprecated'
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ },
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1410': {
+ 'api': {
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'fake_tabs.idl': FAKE_TABS_IDL,
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {},
+ 'windowId': {}
+ }
+ },
+ {
+ 'id': 'InjectDetails',
+ 'properties': {
+ 'allFrames': {},
+ }
+ },
+ {
+ 'id': 'DeprecatedType',
+ 'deprecated': 'This is deprecated'
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1364': {
+ 'api': {
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'fake_tabs.idl': FAKE_TABS_WITH_INLINING_IDL,
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {},
+ 'windowId': {}
+ }
+ },
+ {
+ 'id': 'InjectDetails',
+ 'properties': {
+ 'allFrames': {}
+ }
+ },
+ {
+ 'id': 'DeprecatedType',
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1312': {
+ 'api': {
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {},
+ 'windowId': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1271': {
+ 'api': {
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {},
+ 'windowId': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1229': {
+ 'api': {
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {},
+ 'windowId': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1180': {
+ 'api': {
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'selected': {},
+ 'id': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1132': {
+ 'api': {
+ '_manifest_features.json': "{}",
+ '_permission_features.json': "{}",
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'id': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1084': {
+ 'api': {
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'id': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'getCurrent',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '1025': {
+ 'api': {
+ 'tabs.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'index': {},
+ 'id': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '963': {
+ 'api': {
+ 'extension_api.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'id': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ },
+ {
+ 'name': 'changeInfo',
+ 'properties': {
+ 'pinned': {},
+ 'status': {}
+ }
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '912': {
+ 'api': {
+ 'extension_api.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'id': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '874': {
+ 'api': {
+ 'extension_api.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'id': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {},
+ 'fakeTabsProperty2': {}
+ },
+ 'functions': [
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '835': {
+ 'api': {
+ 'extension_api.json': json.dumps([{
+ 'namespace': 'tabs',
+ 'types': [
+ {
+ 'id': 'Tab',
+ 'properties': {
+ 'url': {},
+ 'id': {}
+ }
+ }
+ ],
+ 'properties': {
+ 'fakeTabsProperty1': {}
+ },
+ 'functions': [
+ {
+ 'name': 'get',
+ 'parameters': [
+ {
+ 'name': 'callback',
+ 'parameters': [
+ {
+ 'name': 'tab'
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ 'events': [
+ {
+ 'name': 'onUpdated',
+ 'parameters': [
+ {
+ 'name': 'tabId'
+ }
+ ]
+ }
+ ]
+ }])
+ }
+ },
+ '782': {
+ 'api': {
+ 'extension_api.json': "{}"
+ }
+ }
+})
+
+import tensorflow as tf
+import numpy as np
+from PIL import Image
+import os
+
+def maybe_download(directory, filename, url):
+ print('Try to dwnloaded', url)
+ if not tf.gfile.Exists(directory):
+ tf.gfile.MakeDirs(directory)
+ filepath = os.path.join(directory, filename)
+ if not tf.gfile.Exists(filepath):
+ filepath, _ = urllib.request.urlretrieve(url, filepath)
+ with tf.gfile.GFile(filepath) as f:
+ size = f.size()
+ print('Successfully downloaded', filename, size, 'bytes.')
+ return filepath
+
+def load_pretrained(filepath):
+ return np.load(filepath, encoding='bytes').item()
+
+def get_epoch():
+ epoch_step = tf.Variable(0, name='epoch_step', trainable=False)
+ epoch_update = epoch_step.assign(epoch_step + 1)
+ return epoch_step, epoch_update
+
+def load_imgs(train_img_dir, filelist):
+ def load_img(path):
+ _img = Image.open(path)
+ img = np.array(_img)
+ _img.close()
+ return img
+
+ _imgs = [os.path.join(train_img_dir, filename + ".png") for filename in filelist]
+
+ imgs = [load_img(_img) for _img in _imgs]
+ return imgs
+
+def load_annots(train_annot_dir, filelist):
+ def load_annot(path):
+ #print(path)
+ annot = np.load(path, encoding='bytes')
+ #print("original dims: {}x{}".format(annot[0,0], annot[0,1]))
+ return annot
+
+ _annots = [os.path.join(train_annot_dir, filename + ".npy") for filename in filelist]
+
+ annots = [load_annot(_annot) for _annot in _annots]
+
+ return annots
+
+def tf_Print(on, x, summarize=50, message=""):
+ if on:
+ x = tf.Print(x, [x, tf.shape(x)], summarize=summarize, message=message)
+
+ return x
+
+def debug_print(on, *x):
+ if on:
+ print(x)
+ return x
+
+"""A module which implements the time frequency estimation.
+
+Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
+"""
+# Authors : Alexandre Gramfort
+# Hari Bharadwaj
+#
+# License : BSD (3-clause)
+
+import warnings
+from math import sqrt
+from copy import deepcopy
+import numpy as np
+from scipy import linalg
+from scipy.fftpack import fftn, ifftn
+
+from ..fixes import partial
+from ..baseline import rescale
+from ..parallel import parallel_func
+from ..utils import logger, verbose, _time_mask
+from ..channels.channels import ContainsMixin, UpdateChannelsMixin
+from ..io.pick import pick_info, pick_types
+from ..utils import check_fname
+from .multitaper import dpss_windows
+from .._hdf5 import write_hdf5, read_hdf5
+
+
+def _get_data(inst, return_itc):
+ """Get data from Epochs or Evoked instance as epochs x ch x time"""
+ from ..epochs import _BaseEpochs
+ from ..evoked import Evoked
+ if not isinstance(inst, (_BaseEpochs, Evoked)):
+ raise TypeError('inst must be Epochs or Evoked')
+ if isinstance(inst, _BaseEpochs):
+ data = inst.get_data()
+ else:
+ if return_itc:
+ raise ValueError('return_itc must be False for evoked data')
+ data = inst.data[np.newaxis, ...].copy()
+ return data
+
+
+def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
+ """Compute Wavelets for the given frequency range
+
+ Parameters
+ ----------
+ sfreq : float
+ Sampling Frequency
+ freqs : array
+ frequency range of interest (1 x Frequencies)
+ n_cycles: float | array of float
+ Number of cycles. Fixed number or one per frequency.
+ sigma : float, (optional)
+ It controls the width of the wavelet ie its temporal
+ resolution. If sigma is None the temporal resolution
+ is adapted with the frequency like for all wavelet transform.
+ The higher the frequency the shorter is the wavelet.
+ If sigma is fixed the temporal resolution is fixed
+ like for the short time Fourier transform and the number
+ of oscillations increases with the frequency.
+ zero_mean : bool
+ Make sure the wavelet is zero mean
+
+ Returns
+ -------
+ Ws : list of array
+ Wavelets time series
+
+ See Also
+ --------
+ mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
+ with Morlet wavelets
+ """
+ Ws = list()
+ n_cycles = np.atleast_1d(n_cycles)
+
+ if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
+ raise ValueError("n_cycles should be fixed or defined for "
+ "each frequency.")
+ for k, f in enumerate(freqs):
+ if len(n_cycles) != 1:
+ this_n_cycles = n_cycles[k]
+ else:
+ this_n_cycles = n_cycles[0]
+ # fixed or scale-dependent window
+ if sigma is None:
+ sigma_t = this_n_cycles / (2.0 * np.pi * f)
+ else:
+ sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
+ # this scaling factor is proportional to (Tallon-Baudry 98):
+ # (sigma_t*sqrt(pi))^(-1/2);
+ t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
+ t = np.r_[-t[::-1], t[1:]]
+ oscillation = np.exp(2.0 * 1j * np.pi * f * t)
+ gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
+ if zero_mean: # to make it zero mean
+ real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
+ oscillation -= real_offset
+ W = oscillation * gaussian_enveloppe
+ W /= sqrt(0.5) * linalg.norm(W.ravel())
+ Ws.append(W)
+ return Ws
+
+
+def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
+ zero_mean=False):
+ """Compute Wavelets for the given frequency range
+
+ Parameters
+ ----------
+ sfreq : float
+ Sampling Frequency.
+ freqs : ndarray, shape (n_freqs,)
+ The frequencies in Hz.
+ n_cycles : float | ndarray, shape (n_freqs,)
+ The number of cycles globally or for each frequency.
+ Defaults to 7.
+ time_bandwidth : float, (optional)
+ Time x Bandwidth product.
+ The number of good tapers (low-bias) is chosen automatically based on
+ this to equal floor(time_bandwidth - 1).
+ Default is 4.0, giving 3 good tapers.
+
+ Returns
+ -------
+ Ws : list of array
+ Wavelets time series
+ """
+ Ws = list()
+ if time_bandwidth < 2.0:
+ raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
+ n_taps = int(np.floor(time_bandwidth - 1))
+ n_cycles = np.atleast_1d(n_cycles)
+
+ if n_cycles.size != 1 and n_cycles.size != len(freqs):
+ raise ValueError("n_cycles should be fixed or defined for "
+ "each frequency.")
+
+ for m in range(n_taps):
+ Wm = list()
+ for k, f in enumerate(freqs):
+ if len(n_cycles) != 1:
+ this_n_cycles = n_cycles[k]
+ else:
+ this_n_cycles = n_cycles[0]
+
+ t_win = this_n_cycles / float(f)
+ t = np.arange(0., t_win, 1.0 / sfreq)
+ # Making sure wavelets are centered before tapering
+ oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
+
+ # Get dpss tapers
+ tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
+ n_taps)
+
+ Wk = oscillation * tapers[m]
+ if zero_mean: # to make it zero mean
+ real_offset = Wk.mean()
+ Wk -= real_offset
+ Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
+
+ Wm.append(Wk)
+
+ Ws.append(Wm)
+
+ return Ws
+
+
+def _centered(arr, newsize):
+ """Aux Function to center data"""
+ # Return the center newsize portion of the array.
+ newsize = np.asarray(newsize)
+ currsize = np.array(arr.shape)
+ startind = (currsize - newsize) // 2
+ endind = startind + newsize
+ myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
+ return arr[tuple(myslice)]
+
+
+def _cwt_fft(X, Ws, mode="same"):
+ """Compute cwt with fft based convolutions
+ Return a generator over signals.
+ """
+ X = np.asarray(X)
+
+ # Precompute wavelets for given frequency range to save time
+ n_signals, n_times = X.shape
+ n_freqs = len(Ws)
+
+ Ws_max_size = max(W.size for W in Ws)
+ size = n_times + Ws_max_size - 1
+ # Always use 2**n-sized FFT
+ fsize = 2 ** int(np.ceil(np.log2(size)))
+
+ # precompute FFTs of Ws
+ fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
+ for i, W in enumerate(Ws):
+ if len(W) > n_times:
+ raise ValueError('Wavelet is too long for such a short signal. '
+ 'Reduce the number of cycles.')
+ fft_Ws[i] = fftn(W, [fsize])
+
+ for k, x in enumerate(X):
+ if mode == "full":
+ tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
+ elif mode == "same" or mode == "valid":
+ tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
+
+ fft_x = fftn(x, [fsize])
+ for i, W in enumerate(Ws):
+ ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
+ if mode == "valid":
+ sz = abs(W.size - n_times) + 1
+ offset = (n_times - sz) / 2
+ tfr[i, offset:(offset + sz)] = _centered(ret, sz)
+ else:
+ tfr[i, :] = _centered(ret, n_times)
+ yield tfr
+
+
+def _cwt_convolve(X, Ws, mode='same'):
+ """Compute time freq decomposition with temporal convolutions
+ Return a generator over signals.
+ """
+ X = np.asarray(X)
+
+ n_signals, n_times = X.shape
+ n_freqs = len(Ws)
+
+ # Compute convolutions
+ for x in X:
+ tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
+ for i, W in enumerate(Ws):
+ ret = np.convolve(x, W, mode=mode)
+ if len(W) > len(x):
+ raise ValueError('Wavelet is too long for such a short '
+ 'signal. Reduce the number of cycles.')
+ if mode == "valid":
+ sz = abs(W.size - n_times) + 1
+ offset = (n_times - sz) / 2
+ tfr[i, offset:(offset + sz)] = ret
+ else:
+ tfr[i] = ret
+ yield tfr
+
+
+def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
+ """Compute time freq decomposition with Morlet wavelets
+
+ This function operates directly on numpy arrays. Consider using
+ `tfr_morlet` to process `Epochs` or `Evoked` instances.
+
+ Parameters
+ ----------
+ X : array of shape [n_signals, n_times]
+ signals (one per line)
+ sfreq : float
+ sampling Frequency
+ freqs : array
+ Array of frequencies of interest
+ use_fft : bool
+ Compute convolution with FFT or temoral convolution.
+ n_cycles: float | array of float
+ Number of cycles. Fixed number or one per frequency.
+ zero_mean : bool
+ Make sure the wavelets are zero mean.
+
+ Returns
+ -------
+ tfr : 3D array
+ Time Frequency Decompositions (n_signals x n_frequencies x n_times)
+
+ See Also
+ --------
+ tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
+ """
+ mode = 'same'
+ # mode = "valid"
+ n_signals, n_times = X.shape
+ n_frequencies = len(freqs)
+
+ # Precompute wavelets for given frequency range to save time
+ Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
+
+ if use_fft:
+ coefs = _cwt_fft(X, Ws, mode)
+ else:
+ coefs = _cwt_convolve(X, Ws, mode)
+
+ tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
+ for k, tfr in enumerate(coefs):
+ tfrs[k] = tfr
+
+ return tfrs
+
+
+def cwt(X, Ws, use_fft=True, mode='same', decim=1):
+ """Compute time freq decomposition with continuous wavelet transform
+
+ Parameters
+ ----------
+ X : array of shape [n_signals, n_times]
+ signals (one per line)
+ Ws : list of array
+ Wavelets time series
+ use_fft : bool
+ Use FFT for convolutions
+ mode : 'same' | 'valid' | 'full'
+ Convention for convolution
+ decim : int
+ Temporal decimation factor
+
+ Returns
+ -------
+ tfr : 3D array
+ Time Frequency Decompositions (n_signals x n_frequencies x n_times)
+
+ See Also
+ --------
+ mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
+ with Morlet wavelets
+ """
+ n_signals, n_times = X[:, ::decim].shape
+ n_frequencies = len(Ws)
+
+ if use_fft:
+ coefs = _cwt_fft(X, Ws, mode)
+ else:
+ coefs = _cwt_convolve(X, Ws, mode)
+
+ tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
+ for k, tfr in enumerate(coefs):
+ tfrs[k] = tfr[..., ::decim]
+
+ return tfrs
+
+
+def _time_frequency(X, Ws, use_fft, decim):
+ """Aux of time_frequency for parallel computing over channels
+ """
+ n_epochs, n_times = X.shape
+ n_times = n_times // decim + bool(n_times % decim)
+ n_frequencies = len(Ws)
+ psd = np.zeros((n_frequencies, n_times)) # PSD
+ plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
+
+ mode = 'same'
+ if use_fft:
+ tfrs = _cwt_fft(X, Ws, mode)
+ else:
+ tfrs = _cwt_convolve(X, Ws, mode)
+
+ for tfr in tfrs:
+ tfr = tfr[:, ::decim]
+ tfr_abs = np.abs(tfr)
+ psd += tfr_abs ** 2
+ plf += tfr / tfr_abs
+ psd /= n_epochs
+ plf = np.abs(plf) / n_epochs
+ return psd, plf
+
+
+@verbose
+def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
+ baseline=None, baseline_mode='ratio', times=None,
+ decim=1, n_jobs=1, zero_mean=False, verbose=None):
+ """Compute time-frequency power on single epochs
+
+ Parameters
+ ----------
+ data : array of shape [n_epochs, n_channels, n_times]
+ The epochs
+ sfreq : float
+ Sampling rate
+ frequencies : array-like
+ The frequencies
+ use_fft : bool
+ Use the FFT for convolutions or not.
+ n_cycles : float | array of float
+ Number of cycles in the Morlet wavelet. Fixed number
+ or one per frequency.
+ baseline : None (default) or tuple of length 2
+ The time interval to apply baseline correction.
+ If None do not apply it. If baseline is (a, b)
+ the interval is between "a (s)" and "b (s)".
+ If a is None the beginning of the data is used
+ and if b is None then b is set to the end of the interval.
+ If baseline is equal ot (None, None) all the time
+ interval is used.
+ baseline_mode : None | 'ratio' | 'zscore'
+ Do baseline correction with ratio (power is divided by mean
+ power during baseline) or zscore (power is divided by standard
+ deviation of power during baseline after subtracting the mean,
+ power = [power - mean(power_baseline)] / std(power_baseline))
+ times : array
+ Required to define baseline
+ decim : int
+ Temporal decimation factor
+ n_jobs : int
+ The number of epochs to process at the same time
+ zero_mean : bool
+ Make sure the wavelets are zero mean.
+ verbose : bool, str, int, or None
+ If not None, override default verbose level (see mne.verbose).
+
+ Returns
+ -------
+ power : 4D array
+ Power estimate (Epochs x Channels x Frequencies x Timepoints).
+ """
+ mode = 'same'
+ n_frequencies = len(frequencies)
+ n_epochs, n_channels, n_times = data[:, :, ::decim].shape
+
+ # Precompute wavelets for given frequency range to save time
+ Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
+
+ parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
+
+ logger.info("Computing time-frequency power on single epochs...")
+
+ power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
+ dtype=np.float)
+
+ # Package arguments for `cwt` here to minimize omissions where only one of
+ # the two calls below is updated with new function arguments.
+ cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
+ if n_jobs == 1:
+ for k, e in enumerate(data):
+ x = cwt(e, **cwt_kw)
+ power[k] = (x * x.conj()).real
+ else:
+ # Precompute tf decompositions in parallel
+ tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
+ for k, tfr in enumerate(tfrs):
+ power[k] = (tfr * tfr.conj()).real
+
+ # Run baseline correction. Be sure to decimate the times array as well if
+ # needed.
+ if times is not None:
+ times = times[::decim]
+ power = rescale(power, times, baseline, baseline_mode, copy=False)
+ return power
+
+
+def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
+ decim=1, n_jobs=1, zero_mean=False):
+ """Compute time induced power and inter-trial phase-locking factor
+
+ The time frequency decomposition is done with Morlet wavelets
+
+ Parameters
+ ----------
+ data : array
+ 3D array of shape [n_epochs, n_channels, n_times]
+ sfreq : float
+ sampling Frequency
+ frequencies : array
+ Array of frequencies of interest
+ use_fft : bool
+ Compute transform with fft based convolutions or temporal
+ convolutions.
+ n_cycles : float | array of float
+ Number of cycles. Fixed number or one per frequency.
+ decim: int
+ Temporal decimation factor
+ n_jobs : int
+ The number of CPUs used in parallel. All CPUs are used in -1.
+ Requires joblib package.
+ zero_mean : bool
+ Make sure the wavelets are zero mean.
+
+ Returns
+ -------
+ power : 2D array
+ Induced power (Channels x Frequencies x Timepoints).
+ Squared amplitude of time-frequency coefficients.
+ phase_lock : 2D array
+ Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
+ """
+ n_frequencies = len(frequencies)
+ n_epochs, n_channels, n_times = data[:, :, ::decim].shape
+
+ # Precompute wavelets for given frequency range to save time
+ Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
+
+ psd = np.empty((n_channels, n_frequencies, n_times))
+ plf = np.empty((n_channels, n_frequencies, n_times))
+ # Separate to save memory for n_jobs=1
+ parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
+ psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
+ for c in range(n_channels))
+ for c, (psd_c, plf_c) in enumerate(psd_plf):
+ psd[c, :, :], plf[c, :, :] = psd_c, plf_c
+ return psd, plf
+
+
+def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
+ baseline, vmin, vmax, dB):
+ """Aux Function to prepare tfr computation"""
+ from ..viz.utils import _setup_vmin_vmax
+
+ if mode is not None and baseline is not None:
+ logger.info("Applying baseline correction '%s' during %s" %
+ (mode, baseline))
+ data = rescale(data.copy(), times, baseline, mode)
+
+ # crop time
+ itmin, itmax = None, None
+ idx = np.where(_time_mask(times, tmin, tmax))[0]
+ if tmin is not None:
+ itmin = idx[0]
+ if tmax is not None:
+ itmax = idx[-1] + 1
+
+ times = times[itmin:itmax]
+
+ # crop freqs
+ ifmin, ifmax = None, None
+ idx = np.where(_time_mask(freqs, fmin, fmax))[0]
+ if fmin is not None:
+ ifmin = idx[0]
+ if fmax is not None:
+ ifmax = idx[-1] + 1
+
+ freqs = freqs[ifmin:ifmax]
+
+ # crop data
+ data = data[:, ifmin:ifmax, itmin:itmax]
+
+ times *= 1e3
+ if dB:
+ data = 10 * np.log10((data * data.conj()).real)
+
+ vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
+ return data, times, freqs, vmin, vmax
+
+
+class AverageTFR(ContainsMixin, UpdateChannelsMixin):
+ """Container for Time-Frequency data
+
+ Can for example store induced power at sensor level or intertrial
+ coherence.
+
+ Parameters
+ ----------
+ info : Info
+ The measurement info.
+ data : ndarray, shape (n_channels, n_freqs, n_times)
+ The data.
+ times : ndarray, shape (n_times,)
+ The time values in seconds.
+ freqs : ndarray, shape (n_freqs,)
+ The frequencies in Hz.
+ nave : int
+ The number of averaged TFRs.
+ comment : str | None
+ Comment on the data, e.g., the experimental condition.
+ Defaults to None.
+ method : str | None
+ Comment on the method used to compute the data, e.g., morlet wavelet.
+ Defaults to None.
+ verbose : bool, str, int, or None
+ If not None, override default verbose level (see mne.verbose).
+
+ Attributes
+ ----------
+ ch_names : list
+ The names of the channels.
+ """
+ @verbose
+ def __init__(self, info, data, times, freqs, nave, comment=None,
+ method=None, verbose=None):
+ self.info = info
+ if data.ndim != 3:
+ raise ValueError('data should be 3d. Got %d.' % data.ndim)
+ n_channels, n_freqs, n_times = data.shape
+ if n_channels != len(info['chs']):
+ raise ValueError("Number of channels and data size don't match"
+ " (%d != %d)." % (n_channels, len(info['chs'])))
+ if n_freqs != len(freqs):
+ raise ValueError("Number of frequencies and data size don't match"
+ " (%d != %d)." % (n_freqs, len(freqs)))
+ if n_times != len(times):
+ raise ValueError("Number of times and data size don't match"
+ " (%d != %d)." % (n_times, len(times)))
+ self.data = data
+ self.times = times
+ self.freqs = freqs
+ self.nave = nave
+ self.comment = comment
+ self.method = method
+
+ @property
+ def ch_names(self):
+ return self.info['ch_names']
+
+ def crop(self, tmin=None, tmax=None, copy=False):
+ """Crop data to a given time interval
+
+ Parameters
+ ----------
+ tmin : float | None
+ Start time of selection in seconds.
+ tmax : float | None
+ End time of selection in seconds.
+ copy : bool
+ If False epochs is cropped in place.
+ """
+ inst = self if not copy else self.copy()
+ mask = _time_mask(inst.times, tmin, tmax)
+ inst.times = inst.times[mask]
+ inst.data = inst.data[..., mask]
+ return inst
+
+ @verbose
+ def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
+ tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
+ cmap='RdBu_r', dB=False, colorbar=True, show=True,
+ title=None, axes=None, verbose=None):
+ """Plot TFRs in a topography with images
+
+ Parameters
+ ----------
+ picks : array-like of int | None
+ The indices of the channels to plot.
+ baseline : None (default) or tuple of length 2
+ The time interval to apply baseline correction.
+ If None do not apply it. If baseline is (a, b)
+ the interval is between "a (s)" and "b (s)".
+ If a is None the beginning of the data is used
+ and if b is None then b is set to the end of the interval.
+ If baseline is equal ot (None, None) all the time
+ interval is used.
+ mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+ Do baseline correction with ratio (power is divided by mean
+ power during baseline) or zscore (power is divided by standard
+ deviation of power during baseline after subtracting the mean,
+ power = [power - mean(power_baseline)] / std(power_baseline)).
+ If None no baseline correction is applied.
+ tmin : None | float
+ The first time instant to display. If None the first time point
+ available is used.
+ tmax : None | float
+ The last time instant to display. If None the last time point
+ available is used.
+ fmin : None | float
+ The first frequency to display. If None the first frequency
+ available is used.
+ fmax : None | float
+ The last frequency to display. If None the last frequency
+ available is used.
+ vmin : float | None
+ The mininum value an the color scale. If vmin is None, the data
+ minimum value is used.
+ vmax : float | None
+ The maxinum value an the color scale. If vmax is None, the data
+ maximum value is used.
+ cmap : matplotlib colormap | str
+ The colormap to use. Defaults to 'RdBu_r'.
+ dB : bool
+ If True, 20*log10 is applied to the data to get dB.
+ colorbar : bool
+ If true, colorbar will be added to the plot. For user defined axes,
+ the colorbar cannot be drawn. Defaults to True.
+ show : bool
+ Call pyplot.show() at the end.
+ title : str | None
+ String for title. Defaults to None (blank/no title).
+ axes : instance of Axes | list | None
+ The axes to plot to. If list, the list must be a list of Axes of
+ the same length as the number of channels. If instance of Axes,
+ there must be only one channel plotted.
+ verbose : bool, str, int, or None
+ If not None, override default verbose level (see mne.verbose).
+
+ Returns
+ -------
+ fig : matplotlib.figure.Figure
+ The figure containing the topography.
+ """
+ from ..viz.topo import _imshow_tfr
+ import matplotlib.pyplot as plt
+ times, freqs = self.times.copy(), self.freqs.copy()
+ data = self.data[picks]
+
+ data, times, freqs, vmin, vmax = \
+ _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
+ baseline, vmin, vmax, dB)
+
+ tmin, tmax = times[0], times[-1]
+ if isinstance(axes, plt.Axes):
+ axes = [axes]
+ if isinstance(axes, list) and len(axes) != len(picks):
+ raise RuntimeError('There must be an axes for each picked '
+ 'channel.')
+ if colorbar:
+ logger.warning('Cannot draw colorbar for user defined axes.')
+ for idx in range(len(data)):
+ if axes is None:
+ fig = plt.figure()
+ ax = fig.add_subplot(111)
+ else:
+ ax = axes[idx]
+ fig = ax.get_figure()
+ _imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, ylim=None,
+ tfr=data[idx: idx + 1], freq=freqs,
+ x_label='Time (ms)', y_label='Frequency (Hz)',
+ colorbar=False, picker=False, cmap=cmap)
+ if title:
+ fig.suptitle(title)
+ if show:
+ plt.show()
+ return fig
+
+ def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
+ tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
+ layout=None, cmap='RdBu_r', title=None, dB=False,
+ colorbar=True, layout_scale=0.945, show=True,
+ border='none', fig_facecolor='k', font_color='w'):
+ """Plot TFRs in a topography with images
+
+ Parameters
+ ----------
+ picks : array-like of int | None
+ The indices of the channels to plot. If None all available
+ channels are displayed.
+ baseline : None (default) or tuple of length 2
+ The time interval to apply baseline correction.
+ If None do not apply it. If baseline is (a, b)
+ the interval is between "a (s)" and "b (s)".
+ If a is None the beginning of the data is used
+ and if b is None then b is set to the end of the interval.
+ If baseline is equal ot (None, None) all the time
+ interval is used.
+ mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+ Do baseline correction with ratio (power is divided by mean
+ power during baseline) or zscore (power is divided by standard
+ deviation of power during baseline after subtracting the mean,
+ power = [power - mean(power_baseline)] / std(power_baseline)).
+ If None no baseline correction is applied.
+ tmin : None | float
+ The first time instant to display. If None the first time point
+ available is used.
+ tmax : None | float
+ The last time instant to display. If None the last time point
+ available is used.
+ fmin : None | float
+ The first frequency to display. If None the first frequency
+ available is used.
+ fmax : None | float
+ The last frequency to display. If None the last frequency
+ available is used.
+ vmin : float | None
+ The mininum value an the color scale. If vmin is None, the data
+ minimum value is used.
+ vmax : float | None
+ The maxinum value an the color scale. If vmax is None, the data
+ maximum value is used.
+ layout : Layout | None
+ Layout instance specifying sensor positions. If possible, the
+ correct layout is inferred from the data.
+ cmap : matplotlib colormap | str
+ The colormap to use. Defaults to 'RdBu_r'.
+ title : str
+ Title of the figure.
+ dB : bool
+ If True, 20*log10 is applied to the data to get dB.
+ colorbar : bool
+ If true, colorbar will be added to the plot
+ layout_scale : float
+ Scaling factor for adjusting the relative size of the layout
+ on the canvas.
+ show : bool
+ Call pyplot.show() at the end.
+ border : str
+ matplotlib borders style to be used for each sensor plot.
+ fig_facecolor : str | obj
+ The figure face color. Defaults to black.
+ font_color: str | obj
+ The color of tick labels in the colorbar. Defaults to white.
+
+ Returns
+ -------
+ fig : matplotlib.figure.Figure
+ The figure containing the topography.
+ """
+ from ..viz.topo import _imshow_tfr, _plot_topo
+ import matplotlib.pyplot as plt
+ times = self.times.copy()
+ freqs = self.freqs
+ data = self.data
+ info = self.info
+
+ if picks is not None:
+ data = data[picks]
+ info = pick_info(info, picks)
+
+ data, times, freqs, vmin, vmax = \
+ _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
+ mode, baseline, vmin, vmax, dB)
+
+ if layout is None:
+ from mne import find_layout
+ layout = find_layout(self.info)
+
+ imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap)
+
+ fig = _plot_topo(info=info, times=times,
+ show_func=imshow, layout=layout,
+ colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
+ layout_scale=layout_scale, title=title, border=border,
+ x_label='Time (ms)', y_label='Frequency (Hz)',
+ fig_facecolor=fig_facecolor,
+ font_color=font_color)
+
+ if show:
+ plt.show()
+
+ return fig
+
+ def _check_compat(self, tfr):
+ """checks that self and tfr have the same time-frequency ranges"""
+ assert np.all(tfr.times == self.times)
+ assert np.all(tfr.freqs == self.freqs)
+
+ def __add__(self, tfr):
+ self._check_compat(tfr)
+ out = self.copy()
+ out.data += tfr.data
+ return out
+
+ def __iadd__(self, tfr):
+ self._check_compat(tfr)
+ self.data += tfr.data
+ return self
+
+ def __sub__(self, tfr):
+ self._check_compat(tfr)
+ out = self.copy()
+ out.data -= tfr.data
+ return out
+
+ def __isub__(self, tfr):
+ self._check_compat(tfr)
+ self.data -= tfr.data
+ return self
+
+ def copy(self):
+ """Return a copy of the instance."""
+ return deepcopy(self)
+
+ def __repr__(self):
+ s = "time : [%f, %f]" % (self.times[0], self.times[-1])
+ s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
+ s += ", nave : %d" % self.nave
+ s += ', channels : %d' % self.data.shape[0]
+ return "" % s
+
+ def apply_baseline(self, baseline, mode='mean'):
+ """Baseline correct the data
+
+ Parameters
+ ----------
+ baseline : tuple or list of length 2
+ The time interval to apply rescaling / baseline correction.
+ If None do not apply it. If baseline is (a, b)
+ the interval is between "a (s)" and "b (s)".
+ If a is None the beginning of the data is used
+ and if b is None then b is set to the end of the interval.
+ If baseline is equal to (None, None) all the time
+ interval is used.
+ mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+ Do baseline correction with ratio (power is divided by mean
+ power during baseline) or z-score (power is divided by standard
+ deviation of power during baseline after subtracting the mean,
+ power = [power - mean(power_baseline)] / std(power_baseline))
+ If None, baseline no correction will be performed.
+ """
+ self.data = rescale(self.data, self.times, baseline, mode, copy=False)
+
+ def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
+ ch_type=None, baseline=None, mode='mean',
+ layout=None, vmin=None, vmax=None, cmap='RdBu_r',
+ sensors=True, colorbar=True, unit=None, res=64, size=2,
+ cbar_fmt='%1.1e', show_names=False, title=None,
+ axes=None, show=True, outlines='head', head_pos=None):
+ """Plot topographic maps of time-frequency intervals of TFR data
+
+ Parameters
+ ----------
+ tmin : None | float
+ The first time instant to display. If None the first time point
+ available is used.
+ tmax : None | float
+ The last time instant to display. If None the last time point
+ available is used.
+ fmin : None | float
+ The first frequency to display. If None the first frequency
+ available is used.
+ fmax : None | float
+ The last frequency to display. If None the last frequency
+ available is used.
+ ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
+ The channel type to plot. For 'grad', the gradiometers are
+ collected in pairs and the RMS for each pair is plotted.
+ If None, then channels are chosen in the order given above.
+ baseline : tuple or list of length 2
+ The time interval to apply rescaling / baseline correction.
+ If None do not apply it. If baseline is (a, b)
+ the interval is between "a (s)" and "b (s)".
+ If a is None the beginning of the data is used
+ and if b is None then b is set to the end of the interval.
+ If baseline is equal to (None, None) all the time
+ interval is used.
+ mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
+ Do baseline correction with ratio (power is divided by mean
+ power during baseline) or z-score (power is divided by standard
+ deviation of power during baseline after subtracting the mean,
+ power = [power - mean(power_baseline)] / std(power_baseline))
+ If None, baseline no correction will be performed.
+ layout : None | Layout
+ Layout instance specifying sensor positions (does not need to
+ be specified for Neuromag data). If possible, the correct layout
+ file is inferred from the data; if no appropriate layout file was
+ found, the layout is automatically generated from the sensor
+ locations.
+ vmin : float | callable
+ The value specfying the lower bound of the color range.
+ If None, and vmax is None, -vmax is used. Else np.min(data).
+ If callable, the output equals vmin(data).
+ vmax : float | callable
+ The value specfying the upper bound of the color range.
+ If None, the maximum absolute value is used. If vmin is None,
+ but vmax is not, defaults to np.min(data).
+ If callable, the output equals vmax(data).
+ cmap : matplotlib colormap
+ Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
+ 'Reds'.
+ sensors : bool | str
+ Add markers for sensor locations to the plot. Accepts matplotlib
+ plot format string (e.g., 'r+' for red plusses). If True, a circle
+ will be used (via .add_artist). Defaults to True.
+ colorbar : bool
+ Plot a colorbar.
+ unit : dict | str | None
+ The unit of the channel type used for colorbar label. If
+ scale is None the unit is automatically determined.
+ res : int
+ The resolution of the topomap image (n pixels along each side).
+ size : float
+ Side length per topomap in inches.
+ cbar_fmt : str
+ String format for colorbar values.
+ show_names : bool | callable
+ If True, show channel names on top of the map. If a callable is
+ passed, channel names will be formatted using the callable; e.g.,
+ to delete the prefix 'MEG ' from all channel names, pass the
+ function lambda x: x.replace('MEG ', ''). If `mask` is not None,
+ only significant sensors will be shown.
+ title : str | None
+ Title. If None (default), no title is displayed.
+ axes : instance of Axes | None
+ The axes to plot to. If None the axes is defined automatically.
+ show : bool
+ Call pyplot.show() at the end.
+ outlines : 'head' | dict | None
+ The outlines to be drawn. If 'head', a head scheme will be drawn.
+ If dict, each key refers to a tuple of x and y positions.
+ The values in 'mask_pos' will serve as image mask. If None, nothing
+ will be drawn. Defaults to 'head'. If dict, the 'autoshrink' (bool)
+ field will trigger automated shrinking of the positions due to
+ points outside the outline. Moreover, a matplotlib patch object can
+ be passed for advanced masking options, either directly or as a
+ function that returns patches (required for multi-axis plots).
+ head_pos : dict | None
+ If None (default), the sensors are positioned such that they span
+ the head circle. If dict, can have entries 'center' (tuple) and
+ 'scale' (tuple) for what the center and scale of the head should be
+ relative to the electrode locations.
+
+ Returns
+ -------
+ fig : matplotlib.figure.Figure
+ The figure containing the topography.
+ """
+ from ..viz import plot_tfr_topomap
+ return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
+ fmax=fmax, ch_type=ch_type, baseline=baseline,
+ mode=mode, layout=layout, vmin=vmin, vmax=vmax,
+ cmap=cmap, sensors=sensors, colorbar=colorbar,
+ unit=unit, res=res, size=size,
+ cbar_fmt=cbar_fmt, show_names=show_names,
+ title=title, axes=axes, show=show,
+ outlines=outlines, head_pos=head_pos)
+
+ def save(self, fname, overwrite=False):
+ """Save TFR object to hdf5 file
+
+ Parameters
+ ----------
+ fname : str
+ The file name, which should end with -tfr.h5 .
+ overwrite : bool
+ If True, overwrite file (if it exists). Defaults to false
+ """
+ write_tfrs(fname, self, overwrite=overwrite)
+
+
+def _prepare_write_tfr(tfr, condition):
+ """Aux function"""
+ return (condition, dict(times=tfr.times, freqs=tfr.freqs,
+ data=tfr.data, info=tfr.info, nave=tfr.nave,
+ comment=tfr.comment, method=tfr.method))
+
+
+def write_tfrs(fname, tfr, overwrite=False):
+ """Write a TFR dataset to hdf5.
+
+ Parameters
+ ----------
+ fname : string
+ The file name, which should end with -tfr.h5
+ tfr : AverageTFR instance, or list of AverageTFR instances
+ The TFR dataset, or list of TFR datasets, to save in one file.
+ Note. If .comment is not None, a name will be generated on the fly,
+ based on the order in which the TFR objects are passed
+ overwrite : bool
+ If True, overwrite file (if it exists). Defaults to False.
+
+ See Also
+ --------
+ read_tfrs
+
+ Notes
+ -----
+ .. versionadded:: 0.9.0
+ """
+ out = []
+ if not isinstance(tfr, (list, tuple)):
+ tfr = [tfr]
+ for ii, tfr_ in enumerate(tfr):
+ comment = ii if tfr_.comment is None else tfr_.comment
+ out.append(_prepare_write_tfr(tfr_, condition=comment))
+ write_hdf5(fname, out, overwrite=overwrite)
+
+
+def read_tfrs(fname, condition=None):
+ """
+ Read TFR datasets from hdf5 file.
+
+ Parameters
+ ----------
+ fname : string
+ The file name, which should end with -tfr.h5 .
+ condition : int or str | list of int or str | None
+ The condition to load. If None, all conditions will be returned.
+ Defaults to None.
+
+ See Also
+ --------
+ write_tfrs
+
+ Returns
+ -------
+ tfrs : list of instances of AverageTFR | instance of AverageTFR
+ Depending on `condition` either the TFR object or a list of multiple
+ TFR objects.
+
+ Notes
+ -----
+ .. versionadded:: 0.9.0
+ """
+
+ check_fname(fname, 'tfr', ('-tfr.h5',))
+
+ logger.info('Reading %s ...' % fname)
+ tfr_data = read_hdf5(fname)
+ if condition is not None:
+ tfr_dict = dict(tfr_data)
+ if condition not in tfr_dict:
+ keys = ['%s' % k for k in tfr_dict]
+ raise ValueError('Cannot find condition ("{0}") in this file. '
+ 'I can give you "{1}""'
+ .format(condition, " or ".join(keys)))
+ out = AverageTFR(**tfr_dict[condition])
+ else:
+ out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
+ return out
+
+
+def tfr_morlet(inst, freqs, n_cycles, use_fft=False,
+ return_itc=True, decim=1, n_jobs=1):
+ """Compute Time-Frequency Representation (TFR) using Morlet wavelets
+
+ Parameters
+ ----------
+ inst : Epochs | Evoked
+ The epochs or evoked object.
+ freqs : ndarray, shape (n_freqs,)
+ The frequencies in Hz.
+ n_cycles : float | ndarray, shape (n_freqs,)
+ The number of cycles globally or for each frequency.
+ use_fft : bool
+ The fft based convolution or not.
+ return_itc : bool
+ Return intertrial coherence (ITC) as well as averaged power.
+ Must be ``False`` for evoked data.
+ decim : int
+ The decimation factor on the time axis. To reduce memory usage.
+ n_jobs : int
+ The number of jobs to run in parallel.
+
+ Returns
+ -------
+ power : instance of AverageTFR
+ The averaged power.
+ itc : instance of AverageTFR
+ The intertrial coherence (ITC). Only returned if return_itc
+ is True.
+
+ See Also
+ --------
+ tfr_multitaper, tfr_stockwell
+ """
+ data = _get_data(inst, return_itc)
+ picks = pick_types(inst.info, meg=True, eeg=True)
+ info = pick_info(inst.info, picks)
+ data = data[:, picks, :]
+ power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
+ frequencies=freqs,
+ n_cycles=n_cycles, n_jobs=n_jobs,
+ use_fft=use_fft, decim=decim,
+ zero_mean=True)
+ times = inst.times[::decim].copy()
+ nave = len(data)
+ out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
+ if return_itc:
+ out = (out, AverageTFR(info, itc, times, freqs, nave,
+ method='morlet-itc'))
+ return out
+
+
+@verbose
+def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
+ use_fft=True, n_cycles=7, decim=1, n_jobs=1,
+ zero_mean=True, verbose=None):
+ """Compute time induced power and inter-trial phase-locking factor
+
+ The time frequency decomposition is done with DPSS wavelets
+
+ Parameters
+ ----------
+ data : np.ndarray, shape (n_epochs, n_channels, n_times)
+ The input data.
+ sfreq : float
+ sampling Frequency
+ frequencies : np.ndarray, shape (n_frequencies,)
+ Array of frequencies of interest
+ time_bandwidth : float
+ Time x (Full) Bandwidth product.
+ The number of good tapers (low-bias) is chosen automatically based on
+ this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
+ use_fft : bool
+ Compute transform with fft based convolutions or temporal
+ convolutions. Defaults to True.
+ n_cycles : float | np.ndarray shape (n_frequencies,)
+ Number of cycles. Fixed number or one per frequency. Defaults to 7.
+ decim: int
+ Temporal decimation factor. Defaults to 1.
+ n_jobs : int
+ The number of CPUs used in parallel. All CPUs are used in -1.
+ Requires joblib package. Defaults to 1.
+ zero_mean : bool
+ Make sure the wavelets are zero mean. Defaults to True.
+ verbose : bool, str, int, or None
+ If not None, override default verbose level (see mne.verbose).
+
+ Returns
+ -------
+ power : np.ndarray, shape (n_channels, n_frequencies, n_times)
+ Induced power. Squared amplitude of time-frequency coefficients.
+ itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
+ Phase locking value.
+ """
+ n_epochs, n_channels, n_times = data[:, :, ::decim].shape
+ logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
+ n_frequencies = len(frequencies)
+ logger.info('Multitaper time-frequency analysis for %d frequencies',
+ n_frequencies)
+
+ # Precompute wavelets for given frequency range to save time
+ Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
+ time_bandwidth=time_bandwidth, zero_mean=zero_mean)
+ n_taps = len(Ws)
+ logger.info('Using %d tapers', n_taps)
+ n_times_wavelets = Ws[0][0].shape[0]
+ if n_times <= n_times_wavelets:
+ warnings.warn("Time windows are as long or longer than the epoch. "
+ "Consider reducing n_cycles.")
+ psd = np.zeros((n_channels, n_frequencies, n_times))
+ itc = np.zeros((n_channels, n_frequencies, n_times))
+ parallel, my_time_frequency, _ = parallel_func(_time_frequency,
+ n_jobs)
+ for m in range(n_taps):
+ psd_itc = parallel(my_time_frequency(data[:, c, :],
+ Ws[m], use_fft, decim)
+ for c in range(n_channels))
+ for c, (psd_c, itc_c) in enumerate(psd_itc):
+ psd[c, :, :] += psd_c
+ itc[c, :, :] += itc_c
+ psd /= n_taps
+ itc /= n_taps
+ return psd, itc
+
+
+def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0, use_fft=True,
+ return_itc=True, decim=1, n_jobs=1):
+ """Compute Time-Frequency Representation (TFR) using DPSS wavelets
+
+ Parameters
+ ----------
+ inst : Epochs | Evoked
+ The epochs or evoked object.
+ freqs : ndarray, shape (n_freqs,)
+ The frequencies in Hz.
+ n_cycles : float | ndarray, shape (n_freqs,)
+ The number of cycles globally or for each frequency.
+ The time-window length is thus T = n_cycles / freq.
+ time_bandwidth : float, (optional)
+ Time x (Full) Bandwidth product. Should be >= 2.0.
+ Choose this along with n_cycles to get desired frequency resolution.
+ The number of good tapers (least leakage from far away frequencies)
+ is chosen automatically based on this to floor(time_bandwidth - 1).
+ Default is 4.0 (3 good tapers).
+ E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
+ If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
+ use_fft : bool
+ The fft based convolution or not.
+ Defaults to True.
+ return_itc : bool
+ Return intertrial coherence (ITC) as well as averaged power.
+ Defaults to True.
+ decim : int
+ The decimation factor on the time axis. To reduce memory usage.
+ Note than this is brute force decimation, no anti-aliasing is done.
+ Defaults to 1.
+ n_jobs : int
+ The number of jobs to run in parallel. Defaults to 1.
+
+ Returns
+ -------
+ power : AverageTFR
+ The averaged power.
+ itc : AverageTFR
+ The intertrial coherence (ITC). Only returned if return_itc
+ is True.
+
+ See Also
+ --------
+ tfr_multitaper, tfr_stockwell
+
+ Notes
+ -----
+ .. versionadded:: 0.9.0
+ """
+
+ data = _get_data(inst, return_itc)
+ picks = pick_types(inst.info, meg=True, eeg=True)
+ info = pick_info(inst.info, picks)
+ data = data[:, picks, :]
+ power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
+ frequencies=freqs, n_cycles=n_cycles,
+ time_bandwidth=time_bandwidth,
+ use_fft=use_fft, decim=decim,
+ n_jobs=n_jobs, zero_mean=True,
+ verbose='INFO')
+ times = inst.times[::decim].copy()
+ nave = len(data)
+ out = AverageTFR(info, power, times, freqs, nave,
+ method='mutlitaper-power')
+ if return_itc:
+ out = (out, AverageTFR(info, itc, times, freqs, nave,
+ method='mutlitaper-itc'))
+ return out
+
+# -*- coding: utf-8 -*-
+#
+# Debug/Helper script for CSV stylesheet development
+#
+# >>> python csv2xml
+# ... converts the CSV file into XML
+#
+# >>> python csv2xml
+# ... converts the CSV file into XML and transforms it using the stylesheet
+#
+
+import csv
+import sys
+
+from lxml import etree
+from xml.sax.saxutils import escape, unescape
+
+TABLE = "table"
+ROW = "row"
+COL = "col"
+FIELD = "field"
+TAG = "tag"
+HASHTAG = "hashtag"
+
+# -----------------------------------------------------------------------------
+def xml_encode(s):
+
+ if s:
+ s = escape(s, {"'": "'", '"': """})
+ return s
+
+# -----------------------------------------------------------------------------
+def xml_decode(s):
+
+ if s:
+ s = unescape(s, {"'": "'", """: '"'})
+ return s
+
+# -----------------------------------------------------------------------------
+def parse(source):
+
+ parser = etree.XMLParser(no_network=False)
+ result = etree.parse(source, parser)
+ return result
+
+# -----------------------------------------------------------------------------
+def s3_unicode(s, encoding="utf-8"):
+ """
+ Convert an object into an unicode instance, to be used instead of
+ unicode(s) (Note: user data should never be converted into str).
+
+ @param s: the object
+ @param encoding: the character encoding
+ """
+
+ if type(s) is unicode:
+ return s
+ try:
+ if not isinstance(s, basestring):
+ if hasattr(s, "__unicode__"):
+ s = unicode(s)
+ else:
+ try:
+ s = unicode(str(s), encoding, "strict")
+ except UnicodeEncodeError:
+ if not isinstance(s, Exception):
+ raise
+ s = " ".join([s3_unicode(arg, encoding) for arg in s])
+ else:
+ s = s.decode(encoding)
+ except UnicodeDecodeError:
+ if not isinstance(s, Exception):
+ raise
+ else:
+ s = " ".join([s3_unicode(arg, encoding) for arg in s])
+ return s
+
+# -------------------------------------------------------------------------
+def csv2tree(source,
+ delimiter=",",
+ quotechar='"'):
+
+ # Increase field size to be able to import WKTs
+ csv.field_size_limit(2**20 * 100) # 100 megs
+
+ # Shortcuts
+ SubElement = etree.SubElement
+
+ root = etree.Element(TABLE)
+
+ def add_col(row, key, value, hashtags=None):
+
+ col = SubElement(row, COL)
+ col.set(FIELD, s3_unicode(key))
+ if hashtags:
+ hashtag = hashtags.get(key)
+ if hashtag and hashtag[1:]:
+ col.set(HASHTAG, hashtag)
+ if value:
+ text = s3_unicode(value).strip()
+ if text[:6].lower() not in ("null", ""):
+ col.text = text
+ else:
+ col.text = ""
+
+ def utf_8_encode(source):
+
+ encodings = ["utf-8-sig", "iso-8859-1"]
+ e = encodings[0]
+ for line in source:
+ if e:
+ try:
+ yield unicode(line, e, "strict").encode("utf-8")
+ except:
+ pass
+ else:
+ continue
+ for encoding in encodings:
+ try:
+ yield unicode(line, encoding, "strict").encode("utf-8")
+ except:
+ continue
+ else:
+ e = encoding
+ break
+
+ hashtags = {}
+
+ import StringIO
+ if not isinstance(source, StringIO.StringIO):
+ source = utf_8_encode(source)
+ reader = csv.DictReader(source,
+ delimiter=delimiter,
+ quotechar=quotechar)
+
+ for i, r in enumerate(reader):
+ # Skip empty rows
+ if not any(r.values()):
+ continue
+ if i == 0:
+ # Auto-detect hashtags
+ items = {}
+ for k, v in r.items():
+ if v:
+ try:
+ v = v.strip()
+ except AttributeError: # v is a List
+ v = s3_unicode(v)
+ items[k] = v
+ if all(v[0] == '#' for v in items.values()):
+ hashtags.update(items)
+ continue
+ row = SubElement(root, ROW)
+ for k in r:
+ add_col(row, k, r[k], hashtags=hashtags)
+
+ return etree.ElementTree(root)
+
+# -----------------------------------------------------------------------------
+def transform(tree, stylesheet_path, **args):
+
+ if args:
+ _args = [(k, "'%s'" % args[k]) for k in args]
+ _args = dict(_args)
+ else:
+ _args = None
+ stylesheet = etree.parse(stylesheet_path)
+
+ ac = etree.XSLTAccessControl(read_file=True, read_network=True)
+ transformer = etree.XSLT(stylesheet, access_control=ac)
+ if _args:
+ result = transformer(tree, **_args)
+ else:
+ result = transformer(tree)
+ return result
+
+# -----------------------------------------------------------------------------
+def main(argv):
+
+ try:
+ csvpath = argv[0]
+ except:
+ print "Usage: python csv2xml []"
+ return
+ try:
+ xslpath = argv[1]
+ except:
+ xslpath = None
+
+ csvfile = open(csvpath)
+ tree = csv2tree(csvfile)
+
+ if xslpath is not None:
+ tree = transform(tree, xslpath)
+
+ print etree.tostring(tree, pretty_print=True)
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
+
+# END =========================================================================
+
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, unicode_literals
+
+import pytest
+
+from case import Mock
+
+from kombu.asynchronous.aws.sqs.message import AsyncMessage
+from kombu.asynchronous.aws.sqs.queue import AsyncQueue
+
+from t.mocks import PromiseMock
+
+from ..case import AWSCase
+
+
+class test_AsyncQueue(AWSCase):
+
+ def setup(self):
+ self.conn = Mock(name='connection')
+ self.x = AsyncQueue(self.conn, '/url')
+ self.callback = PromiseMock(name='callback')
+
+ def test_message_class(self):
+ assert issubclass(self.x.message_class, AsyncMessage)
+
+ def test_get_attributes(self):
+ self.x.get_attributes(attributes='QueueSize', callback=self.callback)
+ self.x.connection.get_queue_attributes.assert_called_with(
+ self.x, 'QueueSize', self.callback,
+ )
+
+ def test_set_attribute(self):
+ self.x.set_attribute('key', 'value', callback=self.callback)
+ self.x.connection.set_queue_attribute.assert_called_with(
+ self.x, 'key', 'value', self.callback,
+ )
+
+ def test_get_timeout(self):
+ self.x.get_timeout(callback=self.callback)
+ self.x.connection.get_queue_attributes.assert_called()
+ on_ready = self.x.connection.get_queue_attributes.call_args[0][2]
+ self.x.connection.get_queue_attributes.assert_called_with(
+ self.x, 'VisibilityTimeout', on_ready,
+ )
+
+ on_ready({'VisibilityTimeout': '303'})
+ self.callback.assert_called_with(303)
+
+ def test_set_timeout(self):
+ self.x.set_timeout(808, callback=self.callback)
+ self.x.connection.set_queue_attribute.assert_called()
+ on_ready = self.x.connection.set_queue_attribute.call_args[0][3]
+ self.x.connection.set_queue_attribute.assert_called_with(
+ self.x, 'VisibilityTimeout', 808, on_ready,
+ )
+ on_ready(808)
+ self.callback.assert_called_with(808)
+ assert self.x.visibility_timeout == 808
+
+ on_ready(None)
+ assert self.x.visibility_timeout == 808
+
+ def test_add_permission(self):
+ self.x.add_permission(
+ 'label', 'accid', 'action', callback=self.callback,
+ )
+ self.x.connection.add_permission.assert_called_with(
+ self.x, 'label', 'accid', 'action', self.callback,
+ )
+
+ def test_remove_permission(self):
+ self.x.remove_permission('label', callback=self.callback)
+ self.x.connection.remove_permission.assert_called_with(
+ self.x, 'label', self.callback,
+ )
+
+ def test_read(self):
+ self.x.read(visibility_timeout=909, callback=self.callback)
+ self.x.connection.receive_message.assert_called()
+ on_ready = self.x.connection.receive_message.call_args[1]['callback']
+ self.x.connection.receive_message.assert_called_with(
+ self.x, number_messages=1, visibility_timeout=909,
+ attributes=None, wait_time_seconds=None, callback=on_ready,
+ )
+
+ messages = [Mock(name='message1')]
+ on_ready(messages)
+
+ self.callback.assert_called_with(messages[0])
+
+ def MockMessage(self, id, md5):
+ m = Mock(name='Message-{0}'.format(id))
+ m.id = id
+ m.md5 = md5
+ return m
+
+ def test_write(self):
+ message = self.MockMessage('id1', 'digest1')
+ self.x.write(message, delay_seconds=303, callback=self.callback)
+ self.x.connection.send_message.assert_called()
+ on_ready = self.x.connection.send_message.call_args[1]['callback']
+ self.x.connection.send_message.assert_called_with(
+ self.x, message.get_body_encoded(), 303,
+ callback=on_ready,
+ )
+
+ new_message = self.MockMessage('id2', 'digest2')
+ on_ready(new_message)
+ assert message.id == 'id2'
+ assert message.md5 == 'digest2'
+
+ def test_write_batch(self):
+ messages = [('id1', 'A', 0), ('id2', 'B', 303)]
+ self.x.write_batch(messages, callback=self.callback)
+ self.x.connection.send_message_batch.assert_called_with(
+ self.x, messages, callback=self.callback,
+ )
+
+ def test_delete_message(self):
+ message = self.MockMessage('id1', 'digest1')
+ self.x.delete_message(message, callback=self.callback)
+ self.x.connection.delete_message.assert_called_with(
+ self.x, message, self.callback,
+ )
+
+ def test_delete_message_batch(self):
+ messages = [
+ self.MockMessage('id1', 'r1'),
+ self.MockMessage('id2', 'r2'),
+ ]
+ self.x.delete_message_batch(messages, callback=self.callback)
+ self.x.connection.delete_message_batch.assert_called_with(
+ self.x, messages, callback=self.callback,
+ )
+
+ def test_change_message_visibility_batch(self):
+ messages = [
+ (self.MockMessage('id1', 'r1'), 303),
+ (self.MockMessage('id2', 'r2'), 909),
+ ]
+ self.x.change_message_visibility_batch(
+ messages, callback=self.callback,
+ )
+ self.x.connection.change_message_visibility_batch.assert_called_with(
+ self.x, messages, callback=self.callback,
+ )
+
+ def test_delete(self):
+ self.x.delete(callback=self.callback)
+ self.x.connection.delete_queue.assert_called_with(
+ self.x, callback=self.callback,
+ )
+
+ def test_count(self):
+ self.x.count(callback=self.callback)
+ self.x.connection.get_queue_attributes.assert_called()
+ on_ready = self.x.connection.get_queue_attributes.call_args[0][2]
+ self.x.connection.get_queue_attributes.assert_called_with(
+ self.x, 'ApproximateNumberOfMessages', on_ready,
+ )
+
+ on_ready({'ApproximateNumberOfMessages': '909'})
+ self.callback.assert_called_with(909)
+
+ def test_interface__count_slow(self):
+ with pytest.raises(NotImplementedError):
+ self.x.count_slow()
+
+ def test_interface__dump(self):
+ with pytest.raises(NotImplementedError):
+ self.x.dump()
+
+ def test_interface__save_to_file(self):
+ with pytest.raises(NotImplementedError):
+ self.x.save_to_file()
+
+ def test_interface__save_to_filename(self):
+ with pytest.raises(NotImplementedError):
+ self.x.save_to_filename()
+
+ def test_interface__save(self):
+ with pytest.raises(NotImplementedError):
+ self.x.save()
+
+ def test_interface__save_to_s3(self):
+ with pytest.raises(NotImplementedError):
+ self.x.save_to_s3()
+
+ def test_interface__load_from_s3(self):
+ with pytest.raises(NotImplementedError):
+ self.x.load_from_s3()
+
+ def test_interface__load_from_file(self):
+ with pytest.raises(NotImplementedError):
+ self.x.load_from_file()
+
+ def test_interface__load_from_filename(self):
+ with pytest.raises(NotImplementedError):
+ self.x.load_from_filename()
+
+ def test_interface__load(self):
+ with pytest.raises(NotImplementedError):
+ self.x.load()
+
+ def test_interface__clear(self):
+ with pytest.raises(NotImplementedError):
+ self.x.clear()
+
+'''
+Script to generate Kivy API from source code.
+
+Code is messy, but working.
+Be careful if you change anything in !
+
+'''
+
+ignore_list = (
+ 'kivy._clock',
+ 'kivy._event',
+ 'kivy.factory_registers',
+ 'kivy.graphics.buffer',
+ 'kivy.graphics.vbo',
+ 'kivy.graphics.vertex',
+ 'kivy.uix.recycleview.__init__',
+ 'kivy.setupconfig',
+ 'kivy.version'
+)
+
+import os
+import sys
+from glob import glob
+
+import kivy
+
+# force loading of kivy modules
+import kivy.app
+import kivy.metrics
+import kivy.atlas
+import kivy.context
+import kivy.core.audio
+import kivy.core.camera
+import kivy.core.clipboard
+import kivy.core.gl
+import kivy.core.image
+import kivy.core.spelling
+import kivy.core.text
+import kivy.core.text.markup
+import kivy.core.video
+import kivy.core.window
+import kivy.geometry
+import kivy.graphics
+import kivy.graphics.shader
+import kivy.graphics.tesselator
+import kivy.animation
+import kivy.modules.console
+import kivy.modules.keybinding
+import kivy.modules.monitor
+import kivy.modules.touchring
+import kivy.modules.inspector
+import kivy.modules.recorder
+import kivy.modules.screen
+import kivy.storage
+import kivy.storage.dictstore
+import kivy.storage.jsonstore
+import kivy.storage.redisstore
+import kivy.network.urlrequest
+import kivy.modules.webdebugger
+import kivy.support
+import kivy.tools.packaging.pyinstaller_hooks
+import kivy.input.recorder
+import kivy.interactive
+import kivy.garden
+from kivy.factory import Factory
+from kivy.lib import osc, ddsfile, mtdev
+
+# check for silenced build
+BE_QUIET = True
+if os.environ.get('BE_QUIET') == 'False':
+ BE_QUIET = False
+
+# force loading of all classes from factory
+for x in list(Factory.classes.keys())[:]:
+ getattr(Factory, x)
+
+# Directory of doc
+base_dir = os.path.dirname(__file__)
+dest_dir = os.path.join(base_dir, 'sources')
+examples_framework_dir = os.path.join(base_dir, '..', 'examples', 'framework')
+
+# Check touch file
+base = 'autobuild.py-done'
+with open(os.path.join(base_dir, base), 'w') as f:
+ f.write('')
+
+
+def writefile(filename, data):
+ global dest_dir
+ # avoid to rewrite the file if the content didn't change
+ f = os.path.join(dest_dir, filename)
+ if not BE_QUIET:
+ print('write', filename)
+ if os.path.exists(f):
+ with open(f) as fd:
+ if fd.read() == data:
+ return
+ h = open(f, 'w')
+ h.write(data)
+ h.close()
+
+
+# Activate Kivy modules
+'''
+for k in kivy.kivy_modules.list().keys():
+ kivy.kivy_modules.import_module(k)
+'''
+
+
+# Search all kivy module
+l = [(x, sys.modules[x],
+ os.path.basename(sys.modules[x].__file__).rsplit('.', 1)[0])
+ for x in sys.modules if x.startswith('kivy') and sys.modules[x]]
+
+
+# Extract packages from modules
+packages = []
+modules = {}
+api_modules = []
+for name, module, filename in l:
+ if name in ignore_list:
+ continue
+ if not any([name.startswith(x) for x in ignore_list]):
+ api_modules.append(name)
+ if filename == '__init__':
+ packages.append(name)
+ else:
+ if hasattr(module, '__all__'):
+ modules[name] = module.__all__
+ else:
+ modules[name] = [x for x in dir(module) if not x.startswith('__')]
+
+packages.sort()
+
+# Create index
+api_index = '''API Reference
+-------------
+
+The API reference is a lexicographic list of all the different classes,
+methods and features that Kivy offers.
+
+.. toctree::
+ :maxdepth: 1
+
+'''
+api_modules.sort()
+for package in api_modules:
+ api_index += " api-%s.rst\n" % package
+
+writefile('api-index.rst', api_index)
+
+
+# Create index for all packages
+# Note on displaying inherited members;
+# Adding the directive ':inherited-members:' to automodule achieves this
+# but is not always desired. Please see
+# https://github.com/kivy/kivy/pull/3870
+
+template = '\n'.join((
+ '=' * 100,
+ '$SUMMARY',
+ '=' * 100,
+ '''
+$EXAMPLES_REF
+
+.. automodule:: $PACKAGE
+ :members:
+ :show-inheritance:
+
+.. toctree::
+
+$EXAMPLES
+'''))
+
+
+template_examples = '''.. _example-reference%d:
+
+Examples
+--------
+
+%s
+'''
+
+template_examples_ref = ('# :ref:`Jump directly to Examples'
+ ' `')
+
+
+def extract_summary_line(doc):
+ """
+ :param doc: the __doc__ field of a module
+ :return: a doc string suitable for a header or empty string
+ """
+ if doc is None:
+ return ''
+ for line in doc.split('\n'):
+ line = line.strip()
+ # don't take empty line
+ if len(line) < 1:
+ continue
+ # ref mark
+ if line.startswith('.. _'):
+ continue
+ return line
+
+for package in packages:
+ summary = extract_summary_line(sys.modules[package].__doc__)
+ if summary is None or summary == '':
+ summary = 'NO DOCUMENTATION (package %s)' % package
+ t = template.replace('$SUMMARY', summary)
+ t = t.replace('$PACKAGE', package)
+ t = t.replace('$EXAMPLES_REF', '')
+ t = t.replace('$EXAMPLES', '')
+
+ # search packages
+ for subpackage in packages:
+ packagemodule = subpackage.rsplit('.', 1)[0]
+ if packagemodule != package or len(subpackage.split('.')) <= 2:
+ continue
+ t += " api-%s.rst\n" % subpackage
+
+ # search modules
+ m = list(modules.keys())
+ m.sort(key=lambda x: extract_summary_line(sys.modules[x].__doc__).upper())
+ for module in m:
+ packagemodule = module.rsplit('.', 1)[0]
+ if packagemodule != package:
+ continue
+ t += " api-%s.rst\n" % module
+
+ writefile('api-%s.rst' % package, t)
+
+
+# Create index for all module
+m = list(modules.keys())
+m.sort()
+refid = 0
+for module in m:
+ summary = extract_summary_line(sys.modules[module].__doc__)
+ if summary is None or summary == '':
+ summary = 'NO DOCUMENTATION (module %s)' % package
+
+ # search examples
+ example_output = []
+ example_prefix = module
+ if module.startswith('kivy.'):
+ example_prefix = module[5:]
+ example_prefix = example_prefix.replace('.', '_')
+
+ # try to found any example in framework directory
+ list_examples = glob('%s*.py' % os.path.join(
+ examples_framework_dir, example_prefix))
+ for x in list_examples:
+ # extract filename without directory
+ xb = os.path.basename(x)
+
+ # add a section !
+ example_output.append('File :download:`%s <%s>` ::' % (
+ xb, os.path.join('..', x)))
+
+ # put the file in
+ with open(x, 'r') as fd:
+ d = fd.read().strip()
+ d = '\t' + '\n\t'.join(d.split('\n'))
+ example_output.append(d)
+
+ t = template.replace('$SUMMARY', summary)
+ t = t.replace('$PACKAGE', module)
+ if len(example_output):
+ refid += 1
+ example_output = template_examples % (
+ refid, '\n\n\n'.join(example_output))
+ t = t.replace('$EXAMPLES_REF', template_examples_ref % refid)
+ t = t.replace('$EXAMPLES', example_output)
+ else:
+ t = t.replace('$EXAMPLES_REF', '')
+ t = t.replace('$EXAMPLES', '')
+ writefile('api-%s.rst' % module, t)
+
+
+# Generation finished
+print('Auto-generation finished')
+
+
+# (c) Crown Copyright 2014 Defence Science and Technology Laboratory UK
+# Author: Rich Brantingham
+
+import copy
+import time
+import json
+import urlparse
+import datetime
+from xml.dom.minidom import parseString
+from xml.parsers.expat import ExpatError
+
+from django.test import TestCase
+from django.core import urlresolvers
+from django.test import client
+from django.conf import settings
+from django.core.urlresolvers import reverse
+from django.contrib.auth.models import User
+
+from registration.models import RegistrationProfile
+from tastypie_mongoengine import test_runner
+
+import projectsapp.documents as documents
+from projectsapp import api
+from projectsapp import api_functions
+
+class Test_Authentication_Base(test_runner.MongoEngineTestCase):
+ """
+ Base class to handle functions common throughout tests
+ """
+
+ api_name = 'v1'
+ c = client.Client()
+
+ def get_meta_and_objects(self, response):
+ content = json.loads(response.content)
+ return content['meta'], content['objects']
+
+ """ User Handling Functions """
+ def resourceListURI(self, resource_name):
+ return urlresolvers.reverse('api_dispatch_list', kwargs={'api_name': self.api_name, 'resource_name': resource_name})
+
+ def resourcePK(self, resource_uri):
+ match = urlresolvers.resolve(resource_uri)
+ return match.kwargs['pk']
+
+ def resourceDetailURI(self, resource_name, resource_pk):
+ return urlresolvers.reverse('api_dispatch_detail', kwargs={'api_name': self.api_name, 'resource_name': resource_name, 'pk': resource_pk})
+
+ def fullURItoAbsoluteURI(self, uri):
+ scheme, netloc, path, query, fragment = urlparse.urlsplit(uri)
+ return urlparse.urlunsplit((None, None, path, query, fragment))
+
+ def add_user(self, email=None, first_name=None, last_name=None):
+ """ Add users
+ Need all 3 optionals.s """
+
+ # Allow ability to add an email from tests
+ if email==None:
+ email = 'bob@example.com'
+ if first_name==None:
+ first_name = 'bob'
+ if last_name==None:
+ last_name = 'roberts'
+
+ # Register a new user
+ resp = self.c.post(reverse('registration_register'),
+ data={'email': email,
+ 'first_name' : first_name, 'last_name' : last_name,
+ 'organisation' : 'org', 'team' : 'team',
+ 'password1': 'test_password', 'password2': 'test_password',
+ 'tos': True})
+
+ # Get the profile of our new user to access the ACTIVATION key
+ profile = RegistrationProfile.objects.get(user__email=email)
+
+ # And now activate the profile using the activation key
+ resp = self.client.get(reverse('registration_activate',
+ args=(),
+ kwargs={'activation_key': profile.activation_key}))
+
+ # Give all other tests access to the user and API key
+ user = User.objects.get(email=email)
+ api_key = user.api_key.key
+
+ return user, api_key
+
+ def build_headers(self, user, api_key):
+ """ Build request headers for calls requiring authentication """
+
+ headers={"HTTP_AUTHORIZATION":"ApiKey %s:%s"%(user.username, api_key)}
+ return headers
+
+ def give_privileges(self, user, priv):
+ """ makes the user superuser | staff """
+
+ if priv.lower() == 'staff':
+ user.is_staff = True
+ elif priv.lower() == 'superuser':
+ user.is_superuser = True
+ else:
+ print 'failed to set privileges (%s) for user %' %(priv, user)
+
+ user.save()
+ return user
+
+#------------------------------------------------------------------------------------------------------------
+
+#@utils.override_settings(DEBUG=True)
+class Test_Basic_Authentication_Functions(Test_Authentication_Base):
+ """
+ Tests that clients can authenticate properly.
+ """
+
+ def setUp(self):
+
+ # Add a user and build API key header
+ self.user_id, self.api_key = self.add_user()
+ self.headers = self.build_headers(self.user_id, self.api_key)
+
+
+ def test_no_auth_required_on_GET(self):
+ """ Authentication block on a post request """
+
+ # Don't actually use the headers in the call
+ response = self.c.get(self.resourceListURI('project'))
+ if settings.ANONYMOUS_VIEWING == True:
+ self.assertEquals(response.status_code, 200)
+ else:
+ self.assertEquals(response.status_code, 401)
+
+ def test_auth_block_a_POST(self):
+ """ Authentication block on a post request """
+
+ # Don't actually use the headers in the call
+ data = {"title": "This project will never stick...",
+ "description": "First project description in here.",
+ "status":"published",
+ "protective_marking" : {"classification" : "public",
+ "descriptor" : "private"
+ }}
+
+ response = self.c.post(self.resourceListURI('project'), data=json.dumps(data), content_type='application/json')
+ self.assertEquals(response.status_code, 401)
+
+ def test_auth_block_a_non_staff_POST(self):
+ """ Authorization blocks a POST request by a non-staff user """
+
+ # Don't actually use the headers in the call
+ data = {"title": "This project will never stick...",
+ "description": "First project description in here.",
+ "status":"published",
+ "protective_marking" : {"classification" : "public",
+ "descriptor" : "private"
+ }}
+
+ response = self.c.post(self.resourceListURI('project'), data=json.dumps(data), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 401)
+
+ def test_auth_allow_staff_POST(self):
+ """ Authorization allows POST by staff user """
+
+ user_id, api_key = self.add_user("staff_user1@projects.com")
+ user = self.give_privileges(user_id, priv='staff')
+ headers = self.build_headers(user_id, api_key)
+
+ # Don't actually use the headers in the call
+ data = {"title": "This project will never stick...",
+ "description": "First project description in here.",
+ "status":"published",
+ "related_ideas":["xcxcxcxcxcxcxcxcxcxcxcxcxcxcx", "xcxcxcxcxcxcxcxcxcxcxcxcxcxcx"],
+ "protective_marking" : {"classification" : "public",
+ "descriptor" : "private"
+ }}
+
+ response = self.c.post(self.resourceListURI('project'), data=json.dumps(data), content_type='application/json', **headers)
+ self.assertEquals(response.status_code, 201)
+
+#------------------------------------------------------------------------------------------------------------
+
+#@utils.override_settings(DEBUG=True)
+class Test_Simple_GET_Project_API(Test_Authentication_Base):
+
+ def setUp(self):
+ """ Insert documents to start with"""
+
+ # Add a user and gain access to the API key and user
+ self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
+ user = self.give_privileges(self.user_id, priv='staff')
+ self.headers = self.build_headers(self.user_id, self.api_key)
+
+ response = self.c.get(self.resourceListURI('project'), **self.headers)
+ self.assertEquals(response.status_code, 200)
+
+ self.pm = {"classification" : "PUBLIC",
+ "classification_short" : "PU",
+ "classification_rank" : 0,
+ "national_caveats_primary_name" : "MY EYES ONLY",
+ "descriptor" : "private",
+ "codewords" : ["banana1","banana2"],
+ "codewords_short" : ["b1","b2"],
+ "national_caveats_members" : ["ME"],
+ "national_caveats_rank" : 3}
+
+ docs = [{"title": "The first project.",
+ "description": "First project description in here.",
+ "status":"published",
+ "protective_marking" : self.pm },
+ {"title": "The second project.",
+ "description": "Second project description in here.",
+ "status":"published",
+ "protective_marking" : self.pm }
+ ]
+
+ # Store the responses
+ self.doc_locations = []
+ for doc in docs:
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.doc_locations.append(response['location'])
+ self.assertEqual(response.status_code, 201)
+
+ def test_get_to_check_failure_anon(self):
+ """ Test to check that new status code isn't backwards breaking"""
+
+ url = '/api/v1/project/?data_level=more&limit=3&offset=0&order_by=-created&status=published'
+ response = self.c.get(url)
+ self.assertEquals(response.status_code, 200)
+
+ def test_get_to_check_failure_authenticated(self):
+ """ Test to check that new status code isn't backwards breaking for authenticated user"""
+
+ url = '/api/v1/project/?data_level=more&limit=3&offset=0&order_by=-created&status=published'
+ response = self.c.get(url, **self.headers)
+ self.assertEquals(response.status_code, 200)
+
+ def test_get_to_check_failure_authenticated_admin(self):
+ """ Test to check that new status code isn't backwards breaking for authenticated ADMIN user"""
+
+ user_id, api_key = self.add_user()
+ user = self.give_privileges(user_id, priv='staff')
+ headers = self.build_headers(user_id, api_key)
+
+ url = '/api/v1/project/?data_level=more&limit=3&offset=0&order_by=-created&status=published'
+ response = self.c.get(url, **headers)
+ self.assertEquals(response.status_code, 200)
+
+ def test_get_all_projects(self):
+ """ Retrieve all projects """
+
+ response = self.c.get(self.resourceListURI('project'), **self.headers)
+ self.assertEquals(response.status_code, 200)
+ meta, content = self.get_meta_and_objects(response)
+ self.assertEquals(meta['total_count'], 2)
+ self.assertEquals(len(content), 2)
+
+ #TODO: Sort out xml tests
+
+ def test_get_xml_list(self):
+ """ Get an xml representation
+ This will ERROR rather than FAIL if it doesn't succeed."""
+
+ response = self.c.get('/api/%s/project/?format=xml'%(self.api_name), **self.headers)
+ self.assertEquals(response.status_code, 200)
+ xml = parseString(response.content)
+
+ def test_get_xml_list_fail(self):
+ """ Get an xml representation - fails on content """
+
+ response = self.c.get('/api/%s/project/?format=xml'%(self.api_name), **self.headers)
+ self.assertEquals(response.status_code, 200)
+ self.assertRaises(ExpatError, parseString, response.content+' this)
+
+ def test_doc_back_count_sort_desc(self):
+ """ Sort results by back count in descending order. """
+
+ # Add some backs
+ for i in range(len(self.doc_ids)-1):
+ backs_uri = self.fullURItoAbsoluteURI(self.doc_ids[i]) + 'backs/'
+
+ # Add a different number of backs for each project
+ x = i + 1
+
+ for j in range(1, x+2):
+ # Add a different user each time
+ user_id, api_key = self.add_user(email='%s@blah.com'%(j))
+ headers = self.build_headers(user_id, api_key)
+
+ try:
+ resp = self.c.post(backs_uri, json.dumps({'comment':{"title":'cool %s'%(j)}}), content_type='application/json', **headers)
+ except Exception, e:
+ print e
+
+ response = self.c.get('/api/v1/project/?order_by=back_count', **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+
+ for x in range(len(objects)-1):
+ this_back_count = objects[x]['back_count']
+ next_back_count = objects[x+1]['back_count']
+ if this_back_count and next_back_count:
+ self.assertGreater(next_back_count, this_back_count)
+
+ ## MORE TESTS FOR DIFFERENT SORT FIELDS ##
+
+#@utils.override_settings(DEBUG=True)
+class Test_Check_Modified(Test_Authentication_Base):
+
+ def setUp(self):
+ """ Add in some documents"""
+
+ # Add a user and gain access to the API key and user
+ self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
+ user = self.give_privileges(self.user_id, priv='staff')
+ self.headers = self.build_headers(self.user_id, self.api_key)
+ number_projects = 3
+
+ # Insert 10 docs with different months
+ for i in range(number_projects):
+ doc = {"title": "Project #%s"%(i), "description": "First project description in here.", "status": "published"}
+ #resp = self.c.get(self.resourceListURI('project'), **self.headers)
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ main_project = response['location']
+ self.assertEquals(response.status_code, 201)
+
+ comments_uri = self.fullURItoAbsoluteURI(main_project) + 'comments/'
+
+ # Try adding new comments
+ for j in range (3):
+ new_comment = {"user" : "rich@rich.com",
+ "body" : "#%s perhaps we could extend that project by..."%(j),
+ "title" : "and what about adding to that project with %s..."%(j),
+ "protective_marking" : {"classification":"unclassified","descriptor":""}}
+ resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
+ self.assertEquals(resp.status_code, 201)
+
+ # Wait to ensure we have a different time
+ time.sleep(0.5)
+
+ # Check they went in OK
+ content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
+ self.assertEquals(len(content['objects']), number_projects)
+
+ def retrieve_most_recent_timestamp(self, objects):
+ """Gets the most recent timestamp"""
+
+ dates = []
+ for obj in objects:
+ dates += [datetime.datetime.strptime(obj['created'], '%Y-%m-%dT%H:%M:%S.%f'), datetime.datetime.strptime(obj['modified'], '%Y-%m-%dT%H:%M:%S.%f')]
+ return max(dates)
+
+ def test_update_project_modified_ts_field_on_POST(self):
+ """ Checks that the Project API updates the modified timestamp field when part of the project is changed"""
+
+ # Get all data
+ content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
+ project_0 = content['objects'][0]
+ old_title = project_0['title']
+ old_ts = project_0['modified']
+
+ # Patch a new title - partial addition which leaves the rest in place correctly.
+ new_title = {"title":"this is a major change to the title because it was offensive..."}
+ response = self.c.patch(self.fullURItoAbsoluteURI(project_0['resource_uri']), json.dumps(new_title), content_type='application/json', **self.headers)
+ content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
+
+ # Retrieve the last project in the set now because it's been moved
+ project_0 = content['objects'][0]
+ new_stored_title = project_0['title']
+
+ # Check its not the same as the previous one and is the intended new one
+ self.assertNotEqual(old_title, new_stored_title)
+ self.assertEqual(new_title['title'], new_stored_title)
+
+ # Check the timestamps
+ new_ts = project_0['modified']
+ self.assertGreater(new_ts, old_ts)
+
+ def test_update_project_modified_ts_field_on_POST_to_comment(self):
+ """ Checks that the Project API updates the modified timestamp field when part of the project is changed.
+ Mods to the comments/backs will change the overall objects modified date."""
+
+ # Get all data
+ content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
+ project_x = content['objects'][-1]
+ project_old_ts = project_x['modified']
+ first_comment = project_x['comments'][0]
+
+ # Patch a new title - partial addition which leaves the rest in place correctly.
+ new_comment_title = {"title":"this is a major change to the title because it was offensive..."}
+ response = self.c.patch(first_comment['resource_uri'], json.dumps(new_comment_title), content_type='application/json', **self.headers)
+ time.sleep(1)
+
+ # After a short sleep, get the projects again
+ content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
+ project_x = content['objects'][-1]
+
+ # Get the modified time for the Project
+ project_new_ts = project_x['modified']
+
+ # Get the first comment again
+ new_first_comment = project_x['comments'][0]
+
+ # Check that the new first comment title is what we tried to change it to
+ self.assertEqual(new_first_comment['title'], new_comment_title['title'])
+
+ # Check that the project modified ts has changes.
+ self.assertGreater(project_new_ts, project_old_ts)
+
+
+ def test_check_project_modified_is_correct(self):
+ """Checks that the project level modified is correct """
+
+ response = self.c.get('/api/v1/project/', **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+ for project in objects:
+ most_recent_comment = self.retrieve_most_recent_timestamp(project['comments'])
+ self.assertEquals(most_recent_comment.strftime('%Y-%m-%dT%H:%M:%S'),
+ datetime.datetime.strptime(project['modified'], '%Y-%m-%dT%H:%M:%S.%f').strftime('%Y-%m-%dT%H:%M:%S'))
+
+ def test_check_meta_modified_is_correct(self):
+ """Checks that the meta-level modified is correct """
+
+ response = self.c.get('/api/v1/project/', **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+ most_recent_project = self.retrieve_most_recent_timestamp(objects)
+ most_recent_comment = datetime.datetime.utcnow() - datetime.timedelta(days=1000)
+ for project in objects:
+ most_recent_comment = max([self.retrieve_most_recent_timestamp(project['comments']), most_recent_comment])
+ most_recent = max([most_recent_project, most_recent_comment])
+
+ self.assertEquals(most_recent, datetime.datetime.strptime(meta['modified'], '%Y-%m-%dT%H:%M:%S.%f'))
+
+ def test_update_project_tag_count(self):
+ """ Check that the tag count changes if its edited."""
+
+ # Get all data
+ content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
+ project_0 = content['objects'][0]
+ old_tags = project_0['tags']
+ old_tag_count = project_0['tag_count']
+ self.assertEquals(old_tag_count, 0)
+
+ # Patch some tags in, having forgotten them first time round
+ add_tags = {"tags" : ["physics","maths","geography","sports","english"]}
+ response = self.c.patch(self.fullURItoAbsoluteURI(project_0['resource_uri']), json.dumps(add_tags), content_type='application/json', **self.headers)
+ content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
+
+ # Retrieve the project
+ project_0 = content['objects'][0]
+ new_tags = project_0['tags']
+ new_tag_count = project_0['tag_count']
+
+ # Check its not the same as the previous one and is the intended new one
+ self.assertNotEqual(old_tags, new_tags)
+ self.assertEqual(new_tag_count, 5)
+
+#@utils.override_settings(DEBUG=True)
+class Test_Data_Level_Responses(Test_Authentication_Base):
+
+ def setUp(self):
+ """ Add in some documents"""
+
+ # Add a user and gain access to the API key and user
+ self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
+ user = self.give_privileges(self.user_id, priv='staff')
+ self.headers = self.build_headers(self.user_id, self.api_key)
+
+ # Insert 10 docs with different months
+ for i in range(3):
+ doc = {"title": "Project #%s"%(i),"status":"published", "description": "First project description in here."}
+ # Just to check its there?
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ main_project = response['location']
+
+ comments_uri = self.fullURItoAbsoluteURI(main_project) + 'comments/'
+ # Try adding new comments
+ for j in range (5):
+ new_comment = {"user" : "rich@rich.com",
+ "body" : "#%s perhaps we could extend that project by..."%(j),
+ "title" : "and what about adding to that project with %s..."%(j),
+ "protective_marking" : {"classification":"unclassified","descriptor":""}}
+ # Just to check its there?
+ self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
+
+ backs_uri = self.fullURItoAbsoluteURI(main_project) + 'backs/'
+ new_backs = {"user" : "dave@dave.com",
+ "comment" : {"title":"this is one of the worst projects ever - someone else tried this and it failed."}}
+ response = self.c.post(backs_uri, json.dumps(new_backs), content_type='application/json', **self.headers)
+
+ # Check they went in OK
+ content = json.loads(self.c.get(self.resourceListURI('project'), **self.headers).content)
+
+ def test_response_data_test_set_list(self):
+ """Check that we get back the expected set of fields"""
+
+ response = self.c.get('/api/v1/project/?data_level=proj_test', **self.headers)
+ meta, data = self.get_meta_and_objects(response)
+ data_response_keys = data[0].keys()
+ for fld in settings.RESPONSE_FIELDS['proj_test']:
+ self.assertTrue(fld in data_response_keys)
+
+ def test_response_data_test_set_meta(self):
+ """Check that we get back the expected set of fields"""
+
+ response = self.c.get('/api/v1/project/?data_level=meta', **self.headers)
+ content = json.loads(response.content)
+ self.assertFalse(content.has_key('data'))
+ self.assertTrue(content.has_key('meta'))
+
+ def test_response_data_check_comments_modified(self):
+ """Is there a meta.modified for a /project//comments/ call?"""
+
+ response = self.c.get('/api/v1/project/?data_level=meta', **self.headers)
+ content = json.loads(response.content)
+ self.assertTrue(content.has_key('meta'))
+ self.assertTrue(content['meta']['modified'])
+
+#@utils.override_settings(DEBUG=True)
+class Test_Contributor_Naming(Test_Authentication_Base):
+
+ def setUp(self):
+ """ Add in some documents"""
+
+ # Add a user and gain access to the API key and user
+ self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
+ user = self.give_privileges(self.user_id, priv='staff')
+ self.headers = self.build_headers(self.user_id, self.api_key)
+
+ # We need another user for comments
+ user_id2, api_key2 = self.add_user(email='dave@davison.com', first_name='dave', last_name='davidson')
+ self.headers2 = self.build_headers(user_id2, api_key2)
+
+ # We also need a 3rd user because of the unique constraint (applied in code logic) on backs fields
+ user_id3, api_key3 = self.add_user(email='john@cleese.com', first_name='john', last_name='cleese')
+ self.headers3 = self.build_headers(user_id3, api_key3)
+
+
+ # Insert 10 docs with different months
+ for i in range(3):
+ doc = {"title": "Project #%s"%(i), "description": "First project description in here.", "status": "published"}
+ # Just to check its there?
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ main_project = response['location']
+
+ comments_uri = self.fullURItoAbsoluteURI(main_project) + 'comments/'
+ # Try adding new comments
+ for j in range (5):
+ new_comment = {"body" : "#%s perhaps we could extend that project by..."%(j),
+ "title" : "and what about adding to that project with %s..."%(j),
+ "protective_marking" : {"classification":"unclassified","descriptor":""}}
+ # Just to check its there?
+ self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers2)
+
+ backs_uri = self.fullURItoAbsoluteURI(main_project) + 'backs/'
+ response = self.c.post(backs_uri, json.dumps({}), content_type='application/json', **self.headers3)
+
+ def test_project_contributor_name(self):
+ """ Check the project has a contribtor name """
+
+ response = self.c.get(self.resourceListURI('project'), **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['contributor_name'], 'Bob Roberts')
+
+ def test_comment_contributor_name(self):
+ """ Check the comment has a contribtor name """
+
+ response = self.c.get(self.resourceListURI('project'), **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['comments'][0]['contributor_name'], 'Dave Davidson')
+
+ def test_backs_contributor_name(self):
+ """ Check the backs has a contribtor name """
+
+ response = self.c.get(self.resourceListURI('project'), **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['backs'][0]['contributor_name'], 'John Cleese')
+
+
+
+#@utils.override_settings(DEBUG=True)
+class Test_Project_With_Protective_Markings(Test_Authentication_Base):
+
+ def setUp(self):
+ """ Add in some documents"""
+
+ # Add a user and gain access to the API key and user
+ self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
+ user = self.give_privileges(self.user_id, priv='staff')
+ self.headers = self.build_headers(self.user_id, self.api_key)
+
+ def test_submit_full_pm(self):
+ """ Submit a complete protective marking """
+
+ doc = {"title": "Project #1",
+ "description": "First project description in here.",
+ "status" : "published",
+ "protective_marking" : {"classification" : "PUBLIC",
+ "classification_short" : "PU",
+ "classification_rank" : 0,
+
+ "national_caveats_primary_name" : 'ME ONLY',
+ "national_caveats_members" : [],
+
+ "codewords" : ['BANANA 1', 'BANANA 2'],
+ "codewords_short" : ['B1', 'B2'],
+
+ "descriptor" : 'PRIVATE'}
+ }
+
+ # Just to check its there?
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+ response = self.c.get(self.resourceListURI('project'))
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['protective_marking']['classification'], 'PUBLIC')
+ self.assertEquals(objects[0]['protective_marking']['pretty_pm'], 'PUBLIC [PRIVATE] BANANA 1/BANANA 2 ME ONLY')
+
+ # Check that it also comes out in the project-level content
+ self.assertEquals(objects[0]['pretty_pm'], 'PUBLIC [PRIVATE] BANANA 1/BANANA 2 ME ONLY')
+
+
+ def test_submit_classification(self):
+ """ Submit a classification """
+
+ doc = {"title": "Project #1",
+ "description": "First project description in here.",
+ "status": "published",
+ "protective_marking" : {"classification" : "PUBLIC",
+ "classification_short" : "PU",
+ "classification_rank" : 0,
+ }
+ }
+
+ # Just to check its there?
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+ response = self.c.get(self.resourceListURI('project'))
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['protective_marking']['classification'], 'PUBLIC')
+ self.assertEquals(objects[0]['protective_marking']['pretty_pm'], 'PUBLIC')
+
+
+ def test_submit_national_caveats(self):
+ """ Submit a national caveat """
+
+ doc = {"title": "Project #1",
+ "description": "First project description in here.",
+ "status":"published",
+ "protective_marking" : {"national_caveats_primary_name" : 'ME ONLY',
+ "national_caveats_members" : ['1', '2', '3'],
+ }
+ }
+
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+ response = self.c.get(self.resourceListURI('project'))
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['protective_marking']['national_caveats_primary_name'], 'ME ONLY')
+ self.assertEquals(objects[0]['protective_marking']['national_caveats_members'], ['1','2','3'])
+ self.assertEquals(objects[0]['protective_marking']['pretty_pm'], 'CLASSIFICATION NOT KNOWN ME ONLY')
+
+
+ def test_submit_codewords(self):
+ """ Submit a codeword """
+
+ doc = {"title": "Project #1",
+ "description": "First project description in here.",
+ "status":"published",
+ "protective_marking" : {"codewords" : ['BANANA 1', 'BANANA 2'],
+ "codewords_short" : ['B1', 'B2']}
+ }
+
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+ response = self.c.get(self.resourceListURI('project'))
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['protective_marking']['codewords'], ['BANANA 1', 'BANANA 2'])
+ self.assertEquals(objects[0]['protective_marking']['codewords_short'], ['B1', 'B2'])
+ self.assertEquals(objects[0]['protective_marking']['pretty_pm'], 'CLASSIFICATION NOT KNOWN BANANA 1/BANANA 2')
+
+ def test_submit_descriptors(self):
+ """ Submit descriptors """
+
+ doc = {"title": "Project #1",
+ "description": "First project description in here.",
+ "status":"published",
+ "protective_marking" : {"descriptor" : 'PRIVATE'}
+ }
+
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+ response = self.c.get(self.resourceListURI('project'))
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['protective_marking']['descriptor'], 'PRIVATE')
+ self.assertEquals(objects[0]['protective_marking']['pretty_pm'], 'CLASSIFICATION NOT KNOWN [PRIVATE]')
+
+
+#@utils.override_settings(DEBUG=True)
+class Test_Get_All_PMs(TestCase):
+
+ def setUp(self):
+
+ self.sample_pms = [{'classification':'public','classification_short':'PU'},
+ {'classification':'group','classification_short':'GR'},
+ {'classification':'private','classification_short':'PR'},
+ {'classification':'personal','classification_short':'PE'}]
+
+ # Build up a PM
+ self.pm = documents.ProtectiveMarking(classification='PUBLIC',
+ classification_short='PU',
+ classification_rank=0,
+ descriptor='BUSINESS',
+ codewords=['THIS','THAT'],
+ codewords_short=['T1','T2'],
+ national_caveats_primary_name='ME ONLY',
+ national_caveats_members=['1','2','3'],
+ national_caveats_rank=2)
+
+ #===============================================================================
+ #
+ # COMMENTED BECAUSE I CAN'T WORK OUT HOW TO BUILD A BUNDLE INSIDE A TEST
+ #
+ # def test_get_pm_from_top_level_only(self):
+ # """Retrieves pms from a set of objects."""
+ #
+ # docs = []
+ # for i in range(4):
+ #
+ # new_pm = copy.deepcopy(self.pm)
+ # new_pm['classification'] = self.sample_pms[i]['classification']
+ # new_pm['classification_short'] = self.sample_pms[i]['classification_short']
+ #
+ # doc = documents.Project(title ='new project', protective_marking = new_pm)
+ # #print doc.to_json()
+ #
+ # docs.append(doc)
+ #
+ # pm_list = api.get_all_pms(docs)
+ #
+ # self.assertEquals(len(pm_list), 4)
+ #
+ #
+ # def test_get_pm_from_top_level_and_nested(self):
+ # """Retrieves pms from a set of objects and sub objects."""
+ #
+ # docs = []
+ #
+ # for i in range(4):
+ #
+ # new_pm = copy.deepcopy(self.pm)
+ # new_pm['classification'] = self.sample_pms[i]['classification']
+ # new_pm['classification_short'] = self.sample_pms[i]['classification_short']
+ #
+ # # Loop and create some comments
+ # comments = [documents.Comment(title='new comment', body='great project', protective_marking=new_pm) for i in range(3)]
+ #
+ # # Create the document
+ # doc = documents.Project(title ='new project',
+ # comments=comments,
+ # protective_marking=new_pm)
+ # docs.append(doc)
+ #
+ # pm_list = api.get_all_pms(docs)
+ #
+ # self.assertEquals(len(pm_list), 16)
+ #
+ #===============================================================================
+
+ def test_get_max_pm_inject_O(self):
+ """Retrieves the max pm"""
+
+ pm_list = []
+ for i in range(3):
+ pm = copy.deepcopy(self.pm)
+ pm_list.append(pm)
+
+ max_pm = api.get_max_pm(pm_list)
+ self.assertEquals(max_pm['classification'], 'PUBLIC')
+
+
+ def test_get_max_pm_inject_S(self):
+ """Retrieves the max pm"""
+
+ pm_list = []
+ for i in range(3):
+ pm = copy.deepcopy(self.pm)
+ pm_list.append(pm)
+
+ pm_list[0]['classification']='PRIVATE'
+ pm_list[0]['classification_short']='PR'
+ pm_list[0]['classification_rank']=2
+
+ max_pm = api.get_max_pm(pm_list)
+ self.assertEquals(max_pm['classification'], 'PRIVATE')
+
+ def test_get_max_pm_inject_TS(self):
+ """Retrieves the max pm"""
+
+ pm_list = []
+ for i in range(3):
+ pm = copy.deepcopy(self.pm)
+ pm_list.append(pm)
+
+ pm_list[0]['classification']='PRIVATE'
+ pm_list[0]['classification_short']='PR'
+ pm_list[0]['classification_rank']=2
+
+ pm_list[0]['classification']='PERSONAL'
+ pm_list[0]['classification_short']='PE'
+ pm_list[0]['classification_rank']=3
+
+ max_pm = api.get_max_pm(pm_list)
+ self.assertEquals(max_pm['classification'], 'PERSONAL')
+
+ def test_get_max_pm_nat_cavs(self):
+ """Retrieves the max pm - check national cavs"""
+
+ pm_list = []
+ for i in range(3):
+ pm = copy.deepcopy(self.pm)
+ pm_list.append(pm)
+
+ pm_list[0]['national_caveats_primary_name']='HIM ONLY'
+ pm_list[0]['national_caveats_members']= ['1','2']
+ pm_list[0]['national_caveats_rank']=3
+
+ max_pm = api.get_max_pm(pm_list)
+ self.assertEquals(max_pm['national_caveats_primary_name'], 'HIM ONLY')
+ self.assertEquals(max_pm['national_caveats_members'], ['1','2'])
+ self.assertEquals(max_pm['national_caveats_rank'], 3)
+
+ def test_get_max_pm_multiple_descriptors(self):
+ """Retrieves the max pm"""
+
+ descriptors=['LOCSEN','PRIVATE','PERSONAL']
+
+ pm_list = []
+ for i in range(3):
+ pm = copy.deepcopy(self.pm)
+ pm['descriptor']=descriptors[i]
+ pm_list.append(pm)
+
+ max_pm = api.get_max_pm(pm_list)
+ self.assertEquals(max_pm['descriptor'], 'LOCSEN,PRIVATE,PERSONAL')
+
+ def test_get_max_pm_multiple_codewords(self):
+ """Retrieves the max pm"""
+
+ codewords=['BANANA1','BANANA2','BANANA3']
+
+ pm_list = []
+ for i in range(3):
+ pm = copy.deepcopy(self.pm)
+ pm['codewords']=[codewords[i]]
+ pm_list.append(pm)
+
+ max_pm = api.get_max_pm(pm_list)
+ self.assertEquals(sorted(max_pm['codewords']), sorted(codewords))
+
+#@utils.override_settings(DEBUG=True)
+class Test_Max_PM_in_Meta(Test_Authentication_Base):
+
+ def setUp(self):
+ """ Add in some documents"""
+
+ # Add a user and gain access to the API key and user
+ self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
+ user = self.give_privileges(self.user_id, priv='staff')
+ self.headers = self.build_headers(self.user_id, self.api_key)
+
+ # We need another user for comments
+ user_id2, api_key2 = self.add_user(email='dave@davison.com', first_name='dave', last_name='davidson')
+ self.headers2 = self.build_headers(user_id2, api_key2)
+
+ # We also need a 3rd user because of the unique constraint (applied in code logic) on backs fields
+ user_id3, api_key3 = self.add_user(email='john@cleese.com', first_name='john', last_name='cleese')
+ self.headers3 = self.build_headers(user_id3, api_key3)
+
+ self.pm = {'classification':'PUBLIC',
+ 'classification_short':'O',
+ 'classification_rank':0,
+ 'descriptor':'PRIVATE',
+ 'codewords':['THIS','THAT'],
+ 'codewords_short':['T1','T2'],
+ 'national_caveats_primary_name':'ME ONLY',
+ 'national_caveats_members':['1','2','3'],
+ 'national_caveats_rank':2
+ }
+
+
+ def test_just_project_level_O(self):
+ """ just pms in the projects - all at O to overcome a bug where PUBLIC wasn't rendering a max pm."""
+
+ doc = {"title": "Project #1",
+ "description": "First project description in here.",
+ "status":"published",
+ 'protective_marking' : self.pm}
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+ response = self.c.get(self.resourceListURI('project'))
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(meta['max_pm']['classification'], 'PUBLIC')
+
+
+ def test_just_project_level(self):
+ """ just pms in the projects """
+
+ # Insert a couple of documents
+ doc = {"title": "Project #1", "description": "First project description in here.", 'protective_marking' : self.pm, "status":"published"}
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+ doc = {"title": "Project #2", "description": "First project description in here.", 'protective_marking' : self.pm, "status":"published"}
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+
+ # Bump the classification on the final one
+ doc['protective_marking']['classification'] = 'PRIVATE'
+ doc['protective_marking']['classification_short'] = 'PR'
+ doc['protective_marking']['classification_rank'] = 2
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+
+ response = self.c.get(self.resourceListURI('project'))
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(meta['max_pm']['classification'], 'PRIVATE')
+
+ def test_include_embedded_level(self):
+ """ PMs inside the embedded level too """
+
+ # Insert a couple of documents
+ doc = {"title": "Project #1", "description": "First project description in here.", 'protective_marking' : self.pm, "status": "published"}
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+ doc = {"title": "Project #2", "description": "First project description in here.", 'protective_marking' : self.pm, "status": "published"}
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+
+ # Bump the classification on the final one
+ doc['protective_marking']['classification'] = 'PRIVATE'
+ doc['protective_marking']['classification_short'] = 'PR'
+ doc['protective_marking']['classification_rank'] = 2
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.assertEquals(response.status_code, 201)
+
+ response = self.c.get(self.resourceListURI('project'))
+ meta, objects = self.get_meta_and_objects(response)
+ comments_uri = objects[0]['resource_uri'] + 'comments/'
+
+ pm = copy.deepcopy(self.pm)
+ pm['classification'] = 'PERSONAL'
+ pm['classification_short'] = 'PE'
+ pm['classification_rank'] = 3
+
+ new_comment = {"body" : "perhaps we could extend that project by...",
+ "title" : "and what about adding to that project with...",
+ "protective_marking" : pm}
+
+ # Just to check its there?
+ response = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers2)
+ check = response['location']
+ response = self.c.get(check)
+
+ response = self.c.get(self.resourceListURI('project'))
+
+ meta, objects = self.get_meta_and_objects(response)
+ #print json.dumps(objects, indent=3)
+
+ self.assertEquals(meta['max_pm']['classification'], 'PERSONAL')
+
+
+#----------------------------------------------------------------------------------------
+
+#@utils.override_settings(DEBUG=True)
+class Test_Deletes(Test_Authentication_Base):
+
+ def setUp(self):
+
+ # Add a user and gain access to the API key and user
+ self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
+ user = self.give_privileges(self.user_id, priv='staff')
+ self.headers = self.build_headers(self.user_id, self.api_key)
+
+
+ user_id2, api_key2 = self.add_user(email='dave@dave.com', first_name='dave', last_name='david')
+ self.headers2 = self.build_headers(user_id2, api_key2)
+
+ self.pm = {"classification" : "PUBLIC",
+ "classification_short" : "PU",
+ "classification_rank" : 0,
+ "national_caveats_primary_name" : "MY EYES ONLY",
+ "descriptor" : "private",
+ "codewords" : ["banana1","banana2"],
+ "codewords_short" : ["b1","b2"],
+ "national_caveats_members" : ["ME"],
+ "national_caveats_rank" : 3}
+
+ """ Insert documents to start with"""
+ docs = [{"title": "The first project.",
+ "description": "First project description in here.",
+ "tags" : ["physics","maths","geography","sports","english"],
+ "protective_marking":self.pm,
+ "status" : "published"},
+
+ {"title": "The second project.",
+ "description": "second project description in here.",
+ "tags" : ["physics","maths","geography","sports"],
+ "protective_marking":self.pm,
+ "status" : "published"}]
+
+ # Store the responses
+ self.doc_locations = []
+ x = 0
+ for doc in docs:
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.doc_locations.append(response['location'])
+ self.assertEqual(response.status_code, 201)
+
+ project_url = response['location']
+ comments_uri = project_url + 'comments/'
+ new_comment = {"body" : "perhaps we could extend that project by...",
+ "title" : "and what about adding to that project with...",
+ "protective_marking" : self.pm}
+ for i in range(x):
+ comment_resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
+
+ x += 1
+
+ time.sleep(1)
+
+ response = self.c.get(self.resourceListURI('project')+'?data_level=less', **self.headers)
+ #for x in json.loads(response.content)['objects']:
+ # print json.dumps(x, indent=4)
+
+ def test_delete_comment_decrement_count(self):
+ """ Delete a comment from an project and check the comment_count reduces """
+
+ # Get the id for the project
+ response = self.c.get(self.resourceListURI('project'), **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+ project1_id = objects[1]['id']
+
+ # Count the comments
+ resp = self.c.get(self.resourceDetailURI('project', project1_id), **self.headers)
+ meta, objects = self.get_meta_and_objects(resp)
+ self.assertEquals(objects[0]['comment_count'], 1)
+
+ # Delete the comment
+ path = self.resourceListURI('project')+'%s/comments/0/'%(project1_id)
+ resp = self.c.delete(path, content_type='application/json', **self.headers)
+
+ response = self.c.get(self.resourceDetailURI('project', project1_id), **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['comment_count'], 0)
+
+ def test_delete_when_multiple_comments(self):
+ """ Create a comment - done in setup
+ Create another comment - done here
+ Attempt to delete a comment by specific URI
+ """
+
+ # Get the project id
+ response = self.c.get(self.resourceListURI('project'), **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+ project1_id = objects[1]['id']
+
+ # Build the comments URI
+ comments_uri = self.resourceDetailURI('project', project1_id)+'comments/'
+
+ # Post a new comment
+ new_comment = {"body" : "perhaps we could extend that project by...",
+ "title" : "and what about adding to that project with...",
+ "protective_marking" : self.pm}
+
+ comment_resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
+
+ # Check there are now 2 comments
+ response = self.c.get(self.resourceDetailURI('project', project1_id), **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['comment_count'], 2)
+ self.assertEquals(len(objects[0]['comments']), 2)
+
+ # Delete the first comment
+ delete_uri = comments_uri + '0/'
+ resp = self.c.delete(delete_uri, content_type='application/json', **self.headers)
+ self.assertEquals(resp.status_code, 204)
+
+ # Now check that it definitely got deleted
+ response = self.c.get(self.resourceDetailURI('project', project1_id), **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+ self.assertEquals(objects[0]['comment_count'], 1)
+ self.assertEquals(len(objects[0]['comments']), 1)
+
+
+#@utils.override_settings(DEBUG=True)
+class Test_Get_Non_Standard_Fields(Test_Authentication_Base):
+
+ def setUp(self):
+
+ # Add a user and gain access to the API key and user
+ self.user_id, self.api_key = self.add_user("staff_user1@projects.com")
+ user = self.give_privileges(self.user_id, priv='staff')
+ self.headers = self.build_headers(self.user_id, self.api_key)
+
+
+ user_id2, api_key2 = self.add_user(email='dave@dave.com', first_name='dave', last_name='david')
+ self.headers2 = self.build_headers(user_id2, api_key2)
+
+ self.pm = {"classification" : "PUBLIC",
+ "classification_short" : "PU",
+ "classification_rank" : 0,
+ "national_caveats_primary_name" : "MY EYES ONLY",
+ "descriptor" : "private",
+ "codewords" : ["banana1","banana2"],
+ "codewords_short" : ["b1","b2"],
+ "national_caveats_members" : ["ME"],
+ "national_caveats_rank" : 3}
+
+ """ Insert documents to start with"""
+ docs = [{"title": "The first project.",
+ "description": 'First project description in here.',
+ "tags" : ["physics","maths","geography","sports","english"],
+ "protective_marking":self.pm,
+ "status" : "published"},
+
+ {"title": "The second project.",
+ "description": "second
project description in here." + " The quick brown fox jumped over the lazy dog."*10,
+ "tags" : ["physics","maths","geography","sports"],
+ "protective_marking":self.pm,
+ "status" : "published"}
+ ]
+
+ # Store the responses
+ self.doc_locations = []
+ x = 0
+ for doc in docs:
+ response = self.c.post(self.resourceListURI('project'), json.dumps(doc), content_type='application/json', **self.headers)
+ self.doc_locations.append(response['location'])
+ self.assertEqual(response.status_code, 201)
+
+ project_url = response['location']
+
+ comments_uri = project_url + 'comments/'
+ new_comment = {"body" : "perhaps we could extend that project by...",
+ "title" : "and what about adding to that project with...",
+ "protective_marking" : self.pm}
+ for i in range(x):
+ comment_resp = self.c.post(comments_uri, json.dumps(new_comment), content_type='application/json', **self.headers)
+
+ x += 1
+
+ time.sleep(1)
+
+ def test_check_description_snippet(self):
+ """ Checks we get back a snippet of the description from html """
+
+ # Retrieve all results
+ response = self.c.get(self.resourceListURI('project'), **self.headers)
+ meta, objects = self.get_meta_and_objects(response)
+
+ #First doc - short
+ self.assertEquals(objects[0]['description_snippet'], 'First project description in here.')
+ self.assertEquals(objects[1]['description_snippet'], 'second project description in here. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox jumped over the lazy dog. The quick brown fox...')
+
+
+class Test_Basic_API_Functions(TestCase):
+
+ def test_strip_tags(self):
+ """ Tests stripping html tags"""
+
+ text = """this is the text"""
+ text = api_functions.strip_tags(text)
+ self.assertEquals(text, "this is the text")
+
+ def test_smart_truncate_short_string(self):
+ """ Tests truncating on full words"""
+
+ text = "the quick brown fox jumped over the lazy dog."
+ text = api_functions.smart_truncate(content=text, length=180, suffix='...')
+ self.assertEquals(text, 'the quick brown fox jumped over the lazy dog.')
+
+ def test_smart_truncate(self):
+ """ Tests truncating on full words"""
+
+ text = "the quick brown fox jumped over the lazy dog."
+ text = api_functions.smart_truncate(content=text, length=18, suffix='...')
+ self.assertEquals(text, 'the quick brown...')
+
+
+ def test_derive_snippet(self):
+ """ Tests complete snippet derivation """
+
+ text = """the quick brown fox jumped over the lazy dog."""
+ text = api_functions.derive_snippet(text_html=text, chrs=18)
+ self.assertEquals(text, 'the quick brown...')
+
+ def test_merge_tag_results(self):
+ """ Check that 2 lists of dicts get merged correctly - exact match """
+
+ project_tags = [{"_id":"hello", "count":1},
+ {"_id":"world", "count":2},
+ {"_id":"again", "count":3}]
+
+ proj_tags = [{"_id":"hello", "count":1},
+ {"_id":"world", "count":2},
+ {"_id":"again", "count":3}]
+
+ truth = [{"_id":"hello", "count":2},
+ {"_id":"world", "count":4},
+ {"_id":"again", "count":6}]
+
+ res = api_functions.merge_tag_results(proj_tags, project_tags)
+
+ truth_dict = {}
+ res_dict = {}
+
+ # This works for 2.6 and 2.7, which trhe previous version of code didn't (only 2.7).
+ for tag in truth:
+ truth_dict[tag['_id']] = tag['count']
+
+ for tag in res:
+ res_dict[tag['_id']] = tag['count']
+
+ for key in truth_dict.keys():
+ self.assertEquals(truth_dict[key], res_dict[key])
+
+ def test_merge_tag_results_gaps(self):
+ """ Check that 2 lists of dicts get merged correctly - gaps in 1 """
+
+ project_tags = [{"_id":"hello", "count":1},
+ {"_id":"world", "count":2},
+ {"_id":"again", "count":3}]
+
+ proj_tags = [{"_id":"hello", "count":1},
+ {"_id":"again", "count":3}]
+
+ truth = [{"_id":"again", "count":6},
+ {"_id":"hello", "count":2},
+ {"_id":"world", "count":2}]
+
+ res = api_functions.merge_tag_results(proj_tags, project_tags)
+ self.assertEquals(truth, res)
+
+ def test_cleanup_tags(self):
+ """ Cleanup the tags submitted from the front-end to avoid XSS risks """
+
+ tags = ['puppies',
+ '',
+ """""",
+ "",
+ "",
+ """`""",
+ 'puppies',
+ 'puppies',
+ 'puppies',
+ 'kittens']
+
+ clean_tags = api_functions.cleanup_tags(tags)
+ self.assertEquals(clean_tags, [u'puppies', u'', u'', u'', u'', u'`', u'puppies', u'puppies', u'puppies', u'kittens'])
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# -*- coding: utf-8 -*-
+# Copyright 2009 Jason Stitt
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+import unittest
+from tidylib import tidy_document
+
+DOC = '''
+
+
+
+
+
+ %s
+
+
+'''
+
+class TestDocs1(unittest.TestCase):
+ """ Test some sample documents """
+
+ def test_doc_with_unclosed_tag(self):
+ h = "hello"
+ expected = DOC % '''
+ hello
+
'''
+ doc, err = tidy_document(h)
+ self.assertEqual(doc, expected)
+
+ def test_doc_with_incomplete_img_tag(self):
+ h = ""
+ expected = DOC % ''''''
+ doc, err = tidy_document(h)
+ self.assertEqual(doc, expected)
+
+ def test_doc_with_entity(self):
+ h = "é"
+ expected = DOC % "é"
+ doc, err = tidy_document(h)
+ self.assertEqual(doc, expected)
+
+ expected = DOC % "é"
+ doc, err = tidy_document(h, {'numeric-entities':1})
+ self.assertEqual(doc, expected)
+
+ def test_doc_with_unicode(self):
+ h = u"unicode string ß"
+ expected = unicode(DOC, 'utf-8') % h
+ doc, err = tidy_document(h)
+ self.assertEqual(doc, expected)
+
+ def test_doc_with_unicode_subclass(self):
+ class MyUnicode(unicode):
+ pass
+
+ h = MyUnicode(u"unicode string ß")
+ expected = unicode(DOC, 'utf-8') % h
+ doc, err = tidy_document(h)
+ self.assertEqual(doc, expected)
+
+
+if __name__ == '__main__':
+ unittest.main()
+# -*- coding: utf-8 -*-
+"""
+markupsafe
+~~~~~~~~~~
+
+Implements an escape function and a Markup string to replace HTML
+special characters with safe representations.
+
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
+"""
+import re
+import string
+
+from ._compat import int_types
+from ._compat import iteritems
+from ._compat import Mapping
+from ._compat import PY2
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import unichr
+
+__version__ = "1.1.1"
+
+__all__ = ["Markup", "soft_unicode", "escape", "escape_silent"]
+
+_striptags_re = re.compile(r"(|<[^>]*>)")
+_entity_re = re.compile(r"&([^& ;]+);")
+
+
+class Markup(text_type):
+ """A string that is ready to be safely inserted into an HTML or XML
+ document, either because it was escaped or because it was marked
+ safe.
+
+ Passing an object to the constructor converts it to text and wraps
+ it to mark it safe without escaping. To escape the text, use the
+ :meth:`escape` class method instead.
+
+ >>> Markup('Hello, World!')
+ Markup('Hello, World!')
+ >>> Markup(42)
+ Markup('42')
+ >>> Markup.escape('Hello, World!')
+ Markup('Hello <em>World</em>!')
+
+ This implements the ``__html__()`` interface that some frameworks
+ use. Passing an object that implements ``__html__()`` will wrap the
+ output of that method, marking it safe.
+
+ >>> class Foo:
+ ... def __html__(self):
+ ... return 'foo'
+ ...
+ >>> Markup(Foo())
+ Markup('foo')
+
+ This is a subclass of the text type (``str`` in Python 3,
+ ``unicode`` in Python 2). It has the same methods as that type, but
+ all methods escape their arguments and return a ``Markup`` instance.
+
+ >>> Markup('%s') % 'foo & bar'
+ Markup('foo & bar')
+ >>> Markup('Hello ') + ''
+ Markup('Hello <foo>')
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, base=u"", encoding=None, errors="strict"):
+ if hasattr(base, "__html__"):
+ base = base.__html__()
+ if encoding is None:
+ return text_type.__new__(cls, base)
+ return text_type.__new__(cls, base, encoding, errors)
+
+ def __html__(self):
+ return self
+
+ def __add__(self, other):
+ if isinstance(other, string_types) or hasattr(other, "__html__"):
+ return self.__class__(super(Markup, self).__add__(self.escape(other)))
+ return NotImplemented
+
+ def __radd__(self, other):
+ if hasattr(other, "__html__") or isinstance(other, string_types):
+ return self.escape(other).__add__(self)
+ return NotImplemented
+
+ def __mul__(self, num):
+ if isinstance(num, int_types):
+ return self.__class__(text_type.__mul__(self, num))
+ return NotImplemented
+
+ __rmul__ = __mul__
+
+ def __mod__(self, arg):
+ if isinstance(arg, tuple):
+ arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
+ else:
+ arg = _MarkupEscapeHelper(arg, self.escape)
+ return self.__class__(text_type.__mod__(self, arg))
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, text_type.__repr__(self))
+
+ def join(self, seq):
+ return self.__class__(text_type.join(self, map(self.escape, seq)))
+
+ join.__doc__ = text_type.join.__doc__
+
+ def split(self, *args, **kwargs):
+ return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
+
+ split.__doc__ = text_type.split.__doc__
+
+ def rsplit(self, *args, **kwargs):
+ return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
+
+ rsplit.__doc__ = text_type.rsplit.__doc__
+
+ def splitlines(self, *args, **kwargs):
+ return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
+
+ splitlines.__doc__ = text_type.splitlines.__doc__
+
+ def unescape(self):
+ """Convert escaped markup back into a text string. This replaces
+ HTML entities with the characters they represent.
+
+ >>> Markup('Main » About').unescape()
+ 'Main » About'
+ """
+ from ._constants import HTML_ENTITIES
+
+ def handle_match(m):
+ name = m.group(1)
+ if name in HTML_ENTITIES:
+ return unichr(HTML_ENTITIES[name])
+ try:
+ if name[:2] in ("#x", "#X"):
+ return unichr(int(name[2:], 16))
+ elif name.startswith("#"):
+ return unichr(int(name[1:]))
+ except ValueError:
+ pass
+ # Don't modify unexpected input.
+ return m.group()
+
+ return _entity_re.sub(handle_match, text_type(self))
+
+ def striptags(self):
+ """:meth:`unescape` the markup, remove tags, and normalize
+ whitespace to single spaces.
+
+ >>> Markup('Main »\tAbout').striptags()
+ 'Main » About'
+ """
+ stripped = u" ".join(_striptags_re.sub("", self).split())
+ return Markup(stripped).unescape()
+
+ @classmethod
+ def escape(cls, s):
+ """Escape a string. Calls :func:`escape` and ensures that for
+ subclasses the correct type is returned.
+ """
+ rv = escape(s)
+ if rv.__class__ is not cls:
+ return cls(rv)
+ return rv
+
+ def make_simple_escaping_wrapper(name): # noqa: B902
+ orig = getattr(text_type, name)
+
+ def func(self, *args, **kwargs):
+ args = _escape_argspec(list(args), enumerate(args), self.escape)
+ _escape_argspec(kwargs, iteritems(kwargs), self.escape)
+ return self.__class__(orig(self, *args, **kwargs))
+
+ func.__name__ = orig.__name__
+ func.__doc__ = orig.__doc__
+ return func
+
+ for method in (
+ "__getitem__",
+ "capitalize",
+ "title",
+ "lower",
+ "upper",
+ "replace",
+ "ljust",
+ "rjust",
+ "lstrip",
+ "rstrip",
+ "center",
+ "strip",
+ "translate",
+ "expandtabs",
+ "swapcase",
+ "zfill",
+ ):
+ locals()[method] = make_simple_escaping_wrapper(method)
+
+ def partition(self, sep):
+ return tuple(map(self.__class__, text_type.partition(self, self.escape(sep))))
+
+ def rpartition(self, sep):
+ return tuple(map(self.__class__, text_type.rpartition(self, self.escape(sep))))
+
+ def format(self, *args, **kwargs):
+ formatter = EscapeFormatter(self.escape)
+ kwargs = _MagicFormatMapping(args, kwargs)
+ return self.__class__(formatter.vformat(self, args, kwargs))
+
+ def __html_format__(self, format_spec):
+ if format_spec:
+ raise ValueError("Unsupported format specification " "for Markup.")
+ return self
+
+ # not in python 3
+ if hasattr(text_type, "__getslice__"):
+ __getslice__ = make_simple_escaping_wrapper("__getslice__")
+
+ del method, make_simple_escaping_wrapper
+
+
+class _MagicFormatMapping(Mapping):
+ """This class implements a dummy wrapper to fix a bug in the Python
+ standard library for string formatting.
+
+ See http://bugs.python.org/issue13598 for information about why
+ this is necessary.
+ """
+
+ def __init__(self, args, kwargs):
+ self._args = args
+ self._kwargs = kwargs
+ self._last_index = 0
+
+ def __getitem__(self, key):
+ if key == "":
+ idx = self._last_index
+ self._last_index += 1
+ try:
+ return self._args[idx]
+ except LookupError:
+ pass
+ key = str(idx)
+ return self._kwargs[key]
+
+ def __iter__(self):
+ return iter(self._kwargs)
+
+ def __len__(self):
+ return len(self._kwargs)
+
+
+if hasattr(text_type, "format"):
+
+ class EscapeFormatter(string.Formatter):
+ def __init__(self, escape):
+ self.escape = escape
+
+ def format_field(self, value, format_spec):
+ if hasattr(value, "__html_format__"):
+ rv = value.__html_format__(format_spec)
+ elif hasattr(value, "__html__"):
+ if format_spec:
+ raise ValueError(
+ "Format specifier {0} given, but {1} does not"
+ " define __html_format__. A class that defines"
+ " __html__ must define __html_format__ to work"
+ " with format specifiers.".format(format_spec, type(value))
+ )
+ rv = value.__html__()
+ else:
+ # We need to make sure the format spec is unicode here as
+ # otherwise the wrong callback methods are invoked. For
+ # instance a byte string there would invoke __str__ and
+ # not __unicode__.
+ rv = string.Formatter.format_field(self, value, text_type(format_spec))
+ return text_type(self.escape(rv))
+
+
+def _escape_argspec(obj, iterable, escape):
+ """Helper for various string-wrapped functions."""
+ for key, value in iterable:
+ if hasattr(value, "__html__") or isinstance(value, string_types):
+ obj[key] = escape(value)
+ return obj
+
+
+class _MarkupEscapeHelper(object):
+ """Helper for Markup.__mod__"""
+
+ def __init__(self, obj, escape):
+ self.obj = obj
+ self.escape = escape
+
+ def __getitem__(self, item):
+ return _MarkupEscapeHelper(self.obj[item], self.escape)
+
+ def __str__(self):
+ return text_type(self.escape(self.obj))
+
+ __unicode__ = __str__
+
+ def __repr__(self):
+ return str(self.escape(repr(self.obj)))
+
+ def __int__(self):
+ return int(self.obj)
+
+ def __float__(self):
+ return float(self.obj)
+
+
+# we have to import it down here as the speedups and native
+# modules imports the markup type which is define above.
+try:
+ from ._speedups import escape, escape_silent, soft_unicode
+except ImportError:
+ from ._native import escape, escape_silent, soft_unicode
+
+if not PY2:
+ soft_str = soft_unicode
+ __all__.append("soft_str")
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""ml2 portbinding
+
+Revision ID: 32a65f71af51
+Revises: 14f24494ca31
+Create Date: 2013-09-03 08:40:22.706651
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '32a65f71af51'
+down_revision = '14f24494ca31'
+
+# Change to ['*'] if this migration applies to all plugins
+
+migration_for_plugins = [
+ 'neutron.plugins.ml2.plugin.Ml2Plugin'
+]
+
+from alembic import op
+import sqlalchemy as sa
+
+from neutron.db import migration
+
+
+def upgrade(active_plugins=None, options=None):
+ if not migration.should_run(active_plugins, migration_for_plugins):
+ return
+
+ op.create_table(
+ 'ml2_port_bindings',
+ sa.Column('port_id', sa.String(length=36), nullable=False),
+ sa.Column('host', sa.String(length=255), nullable=False),
+ sa.Column('vif_type', sa.String(length=64), nullable=False),
+ sa.Column('cap_port_filter', sa.Boolean(), nullable=False),
+ sa.Column('driver', sa.String(length=64), nullable=True),
+ sa.Column('segment', sa.String(length=36), nullable=True),
+ sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
+ ondelete='CASCADE'),
+ sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'],
+ ondelete='SET NULL'),
+ sa.PrimaryKeyConstraint('port_id')
+ )
+
+ # Note that 176a85fc7d79_add_portbindings_db.py was never enabled
+ # for ml2, so there is no need to drop the portbindingports table
+ # that is no longer used.
+
+
+def downgrade(active_plugins=None, options=None):
+ if not migration.should_run(active_plugins, migration_for_plugins):
+ return
+
+ op.drop_table('ml2_port_bindings')
+
+# -*- coding: utf-8 -*-
+##############################################################################
+#
+# OpenERP, Open Source Management Solution
+# Copyright (C) 2013-Today OpenERP SA ()
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see
+#
+##############################################################################
+
+{
+ 'name': 'Mass Mailing Campaigns',
+ 'summary': 'Design, send and track emails',
+ 'description': """
+Easily send mass mailing to your leads, opportunities or customers. Track
+marketing campaigns performance to improve conversion rates. Design
+professional emails and reuse templates in a few clicks.
+ """,
+ 'version': '2.0',
+ 'author': 'OpenERP',
+ 'website': 'https://www.odoo.com/page/mailing',
+ 'category': 'Marketing',
+ 'depends': [
+ 'mail',
+ 'email_template',
+ 'marketing',
+ 'web_kanban_gauge',
+ 'web_kanban_sparkline',
+ 'website_mail',
+ ],
+ 'data': [
+ 'data/mail_data.xml',
+ 'data/mass_mailing_data.xml',
+ 'wizard/mail_compose_message_view.xml',
+ 'wizard/test_mailing.xml',
+ 'views/mass_mailing_report.xml',
+ 'views/mass_mailing.xml',
+ 'views/res_config.xml',
+ 'views/res_partner.xml',
+ 'views/email_template.xml',
+ 'views/website_mass_mailing.xml',
+ 'views/snippets.xml',
+ 'security/ir.model.access.csv',
+ 'views/mass_mailing.xml',
+ ],
+ 'qweb': [],
+ 'demo': [
+ 'data/mass_mailing_demo.xml',
+ ],
+ 'installable': True,
+ 'auto_install': False,
+}
+
+#! /usr/bin/env python3
+import sys
+from os.path import abspath, expanduser, dirname, join
+from itertools import chain
+import json
+import argparse
+
+from vis import vis, unvis, VIS_WHITE
+
+
+__dir__ = dirname(abspath(__file__))
+
+OUTPUT_FILE = join(__dir__, '..', 'fixtures', 'unvis_fixtures.json')
+
+# Add custom fixtures here
+CUSTOM_FIXTURES = [
+ # test long multibyte string
+ ''.join(chr(cp) for cp in range(1024)),
+ 'foo bar',
+ 'foo\nbar',
+ "$bar = 'baz';",
+ r'$foo = "\x20\\x20\\\x20\\\\x20"',
+ '$foo = function($bar) use($baz) {\n\treturn $baz->getFoo()\n};'
+]
+
+RANGES = {
+ # All valid codepoints in the BMP
+ 'bmp': chain(range(0x0000, 0xD800), range(0xE000, 0xFFFF)),
+ # Smaller set of pertinent? codepoints inside BMP
+ # see: http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
+ 'small': chain(
+ # latin blocks
+ range(0x0000, 0x0250),
+ # Greek, Cyrillic
+ range(0x0370, 0x0530),
+ # Hebrew, Arabic
+ range(0x590, 0x0700),
+ # CJK radicals
+ range(0x2E80, 0x2F00),
+ # Hiragana, Katakana
+ range(0x3040, 0x3100)
+ )
+}
+
+
+if __name__ == '__main__':
+
+ argp = argparse.ArgumentParser(
+ description='Generates test data for Psy\\Test\\Util\\StrTest')
+ argp.add_argument('-f', '--format-output', action='store_true',
+ help='Indent JSON output to ease debugging')
+ argp.add_argument('-a', '--all', action='store_true',
+ help="""Generates test data for all codepoints of the BMP.
+ (same as --range=bmp). WARNING: You will need quite
+ a lot of RAM to run the testsuite !
+ """)
+ argp.add_argument('-r', '--range',
+ help="""Choose the range of codepoints used to generate
+ test data.""",
+ choices=list(RANGES.keys()),
+ default='small')
+ argp.add_argument('-o', '--output-file',
+ help="""Write test data to OUTPUT_FILE
+ (defaults to PSYSH_DIR/test/fixtures)""")
+ args = argp.parse_args()
+
+ cp_range = RANGES['bmp'] if args.all else RANGES[args.range]
+ indent = 2 if args.format_output else None
+ if args.output_file:
+ OUTPUT_FILE = abspath(expanduser(args.output_file))
+
+ fixtures = []
+
+ # use SMALL_RANGE by default, it should be enough.
+ # use BMP_RANGE for a more complete smoke test
+ for codepoint in cp_range:
+ char = chr(codepoint)
+ encoded = vis(char, VIS_WHITE)
+ decoded = unvis(encoded)
+ fixtures.append((encoded, decoded))
+
+ # Add our own custom fixtures at the end,
+ # since they would fail anyway if one of the previous did.
+ for fixture in CUSTOM_FIXTURES:
+ encoded = vis(fixture, VIS_WHITE)
+ decoded = unvis(encoded)
+ fixtures.append((encoded, decoded))
+
+ with open(OUTPUT_FILE, 'w') as fp:
+ # dump as json to avoid backslashin and quotin nightmare
+ # between php and python
+ json.dump(fixtures, fp, indent=indent)
+
+ sys.exit(0)
+
+#!/usr/bin/env python2
+# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
+
+__license__ = 'GPL v3'
+__copyright__ = '2010, Kovid Goyal '
+__docformat__ = 'restructuredtext en'
+
+import os, textwrap, re, subprocess
+
+INC = '/usr/include/ImageMagick-6'
+
+'''
+Various constants defined in the ImageMagick header files. Note that
+they are defined as actual numeric constants rather than symbolic names to
+ensure that the extension can be compiled against older versions of ImageMagick
+than the one this script is run against.
+'''
+
+def parse_enums(f):
+ print '\nParsing:', f
+ raw = open(os.path.join(INC, f)).read()
+ raw = re.sub(r'(?s)/\*.*?\*/', '', raw)
+ raw = re.sub('#.*', '', raw)
+
+ for enum in re.findall(r'typedef\s+enum\s+\{([^}]+)', raw):
+ enum = re.sub(r'(?s)/\*.*?\*/', '', enum)
+ for x in enum.splitlines():
+ e = x.split(',')[0].strip().split(' ')[0]
+ if e:
+ val = get_value(e)
+ print e, val
+ yield e, val
+
+def get_value(const):
+ t = '''
+ #define MAGICKCORE_QUANTUM_DEPTH 16
+ #define MAGICKCORE_HDRI_ENABLE 0
+ #include
+ #include
+ int main(int argc, char **argv) {
+ printf("%%d", %s);
+ return 0;
+ }
+ '''%const
+ with open('/tmp/ig.c','wb') as f:
+ f.write(t)
+ subprocess.check_call(['gcc', '-I'+INC, '/tmp/ig.c', '-o', '/tmp/ig', '-lMagickWand-6.Q16'])
+ return int(subprocess.Popen(["/tmp/ig"],
+ stdout=subprocess.PIPE).communicate()[0].strip())
+
+
+def main():
+ constants = []
+ for x in ('resample', 'image', 'draw', 'distort', 'composite', 'geometry',
+ 'colorspace', 'compare', 'compress'):
+ constants += list(parse_enums('magick/%s.h'%x))
+ base = os.path.dirname(__file__)
+ constants = [
+ 'PyModule_AddIntConstant(m, "{0}", {1});'.format(c, v) for c, v in
+ constants]
+ raw = textwrap.dedent('''\
+ // Generated by generate.py
+
+ static void magick_add_module_constants(PyObject *m) {
+ %s
+ }
+ ''')%'\n '.join(constants)
+ with open(os.path.join(base, 'magick_constants.h'), 'wb') as f:
+ f.write(raw)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- coding: utf-8 -*-
+# Generated by Django 1.10.2 on 2016-10-24 12:15
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+import swapper
+
+from ..conf import ALTERNATIVE_NAME_TYPES
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('cities', '0005_add_foreignkeys_to_postalcode'),
+ swapper.dependency('cities', 'City'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='alternativename',
+ name='is_historic',
+ field=models.BooleanField(default=False),
+ ),
+ migrations.AddField(
+ model_name='alternativename',
+ name='kind',
+ field=models.CharField(choices=ALTERNATIVE_NAME_TYPES, default='name', max_length=4),
+ ),
+ migrations.AlterField(
+ model_name='alternativename',
+ name='name',
+ field=models.CharField(max_length=255),
+ ),
+ migrations.AlterField(
+ model_name='postalcode',
+ name='city',
+ field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='postal_codes', to=swapper.get_model_name('cities', 'City')),
+ ),
+ ]
+
+#! /usr/bin/env python
+
+# This file is part of the dvbobjects library.
+#
+# Copyright © 2004-2013 Lorenzo Pallara l.pallara@avalpa.com
+# Copyright © 2010 Andreas Regel
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+import string
+from dvbobjects.MPEG.Section import Section
+from dvbobjects.utils import *
+from dvbobjects.DVB.Descriptors import *
+
+######################################################################
+class bouquet_association_section(Section):
+
+ table_id = 0x4A
+
+ section_max_size = 1024
+
+ def pack_section_body(self):
+
+ # pack bouquet_descriptor_loop
+ bdl_bytes = string.join(
+ map(lambda x: x.pack(),
+ self.bouquet_descriptor_loop),
+ "")
+
+ # pack transport_stream_loop
+ tsl_bytes = string.join(
+ map(lambda x: x.pack(),
+ self.transport_stream_loop),
+ "")
+
+ self.table_id_extension = self.bouquet_id
+ self.private_indicator = 1
+
+ fmt = "!H%dsH%ds" % (len(bdl_bytes), len(tsl_bytes))
+ return pack(fmt,
+ 0xF000 | (len(bdl_bytes) & 0x0FFF),
+ bdl_bytes,
+ 0xF000 | (len(tsl_bytes) & 0x0FFF),
+ tsl_bytes,
+ )
+
+######################################################################
+class transport_stream_loop_item(DVBobject):
+
+ def pack(self):
+
+ # pack transport_descriptor_loop
+ tdl_bytes = string.join(
+ map(lambda x: x.pack(),
+ self.transport_descriptor_loop),
+ "")
+
+ fmt = "!HHH%ds" % len(tdl_bytes)
+ return pack(fmt,
+ self.transport_stream_id,
+ self.original_network_id,
+ 0xF000 | (len(tdl_bytes) & 0x0FFF),
+ tdl_bytes,
+ )
+
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+import logging
+from operator import itemgetter
+import os
+
+from openerp import report, tools
+
+_logger = logging.getLogger(__name__)
+
+def graph_get(cr, graph, wkf_ids, nested, workitem, witm_trans, processed_subflows):
+ import pydot
+ cr.execute('select * from wkf_activity where wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
+ nodes = cr.dictfetchall()
+ activities = {}
+ actfrom = {}
+ actto = {}
+ for n in nodes:
+ activities[n['id']] = n
+ if n['subflow_id'] and nested and n['subflow_id'] not in processed_subflows:
+ processed_subflows.add(n['subflow_id']) # don't create multiple times the same cluster.
+ cr.execute('select * from wkf where id=%s', (n['subflow_id'],))
+ wkfinfo = cr.dictfetchone()
+ graph2 = pydot.Cluster('subflow'+str(n['subflow_id']), fontsize='12', label = "\"Subflow: %s\\nOSV: %s\"" % ( n['name'], wkfinfo['osv']) )
+ (s1,s2) = graph_get(cr, graph2, [n['subflow_id']], True, workitem, witm_trans, processed_subflows)
+ graph.add_subgraph(graph2)
+ actfrom[n['id']] = s2
+ actto[n['id']] = s1
+ else:
+ args = {}
+ if n['flow_start'] or n['flow_stop']:
+ args['style']='filled'
+ args['color']='lightgrey'
+ args['label']=n['name']
+ workitems = ''
+ if n['id'] in workitem:
+ workitems = '\\nx ' + str(workitem[n['id']])
+ args['label'] += workitems
+ args['color'] = "red"
+ args['style']='filled'
+ if n['subflow_id']:
+ args['shape'] = 'box'
+ if nested and n['subflow_id'] in processed_subflows:
+ cr.execute('select * from wkf where id=%s', (n['subflow_id'],))
+ wkfinfo = cr.dictfetchone()
+ args['label'] = \
+ '\"Subflow: %s\\nOSV: %s\\n(already expanded)%s\"' % \
+ (n['name'], wkfinfo['osv'], workitems)
+ args['color'] = 'green'
+ args['style'] ='filled'
+ graph.add_node(pydot.Node(n['id'], **args))
+ actfrom[n['id']] = (n['id'],{})
+ actto[n['id']] = (n['id'],{})
+ node_ids = tuple(map(itemgetter('id'), nodes))
+ cr.execute('select * from wkf_transition where act_from IN %s ORDER BY sequence,id', (node_ids,))
+ transitions = cr.dictfetchall()
+ for t in transitions:
+ if not t['act_to'] in activities:
+ continue
+ args = {
+ 'label': str(t['condition']).replace(' or ', '\\nor ')
+ .replace(' and ','\\nand ')
+ }
+ if t['signal']:
+ args['label'] += '\\n'+str(t['signal'])
+ args['style'] = 'bold'
+
+ if activities[t['act_from']]['split_mode']=='AND':
+ args['arrowtail']='box'
+ elif str(activities[t['act_from']]['split_mode'])=='OR ':
+ args['arrowtail']='inv'
+
+ if activities[t['act_to']]['join_mode']=='AND':
+ args['arrowhead']='crow'
+ if t['id'] in witm_trans:
+ args['color'] = 'red'
+
+ activity_from = actfrom[t['act_from']][1].get(t['signal'], actfrom[t['act_from']][0])
+ activity_to = actto[t['act_to']][1].get(t['signal'], actto[t['act_to']][0])
+ graph.add_edge(pydot.Edge( str(activity_from) ,str(activity_to), fontsize='10', **args))
+
+ cr.execute('select * from wkf_activity where flow_start=True and wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
+ start = cr.fetchone()[0]
+ cr.execute("select 'subflow.'||name,id from wkf_activity where flow_stop=True and wkf_id in ("+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
+ stop = cr.fetchall()
+ if stop:
+ stop = (stop[0][1], dict(stop))
+ else:
+ stop = ("stop",{})
+ return (start, {}), stop
+
+
+def graph_instance_get(cr, graph, inst_id, nested=False):
+ cr.execute('select wkf_id from wkf_instance where id=%s', (inst_id,))
+ inst = cr.fetchall()
+
+ def workitem_get(instance):
+ cr.execute('select act_id,count(*) from wkf_workitem where inst_id=%s group by act_id', (instance,))
+ workitems = dict(cr.fetchall())
+
+ cr.execute('select subflow_id from wkf_workitem where inst_id=%s', (instance,))
+ for (subflow_id,) in cr.fetchall():
+ workitems.update(workitem_get(subflow_id))
+ return workitems
+
+ def witm_get(instance):
+ cr.execute("select trans_id from wkf_witm_trans where inst_id=%s", (instance,))
+ return set(t[0] for t in cr.fetchall())
+
+ processed_subflows = set()
+ graph_get(cr, graph, [x[0] for x in inst], nested, workitem_get(inst_id), witm_get(inst_id), processed_subflows)
+
+#
+# TODO: pas clean: concurrent !!!
+#
+
+class report_graph_instance(object):
+ def __init__(self, cr, uid, ids, data):
+ try:
+ import pydot
+ except Exception,e:
+ _logger.warning(
+ 'Import Error for pydot, you will not be able to render workflows.\n'
+ 'Consider Installing PyDot or dependencies: http://dkbza.org/pydot.html.')
+ raise e
+ self.done = False
+
+ try:
+ cr.execute('select * from wkf where osv=%s limit 1',
+ (data['model'],))
+ wkfinfo = cr.dictfetchone()
+ if not wkfinfo:
+ ps_string = '''%PS-Adobe-3.0
+/inch {72 mul} def
+/Times-Roman findfont 50 scalefont setfont
+1.5 inch 15 inch moveto
+(No workflow defined) show
+showpage'''
+ else:
+ cr.execute('select i.id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where res_id=%s and osv=%s',(data['id'],data['model']))
+ inst_ids = cr.fetchall()
+ if not inst_ids:
+ ps_string = '''%PS-Adobe-3.0
+/inch {72 mul} def
+/Times-Roman findfont 50 scalefont setfont
+1.5 inch 15 inch moveto
+(No workflow instance defined) show
+showpage'''
+ else:
+ graph = pydot.Dot(graph_name=data['model'].replace('.','_'),
+ fontsize='16',
+ label="""\\\n\\nWorkflow: %s\\n OSV: %s""" % (wkfinfo['name'],wkfinfo['osv']),
+ size='7.3, 10.1', center='1', ratio='auto', rotate='0', rankdir='TB',
+ )
+ for inst_id in inst_ids:
+ inst_id = inst_id[0]
+ graph_instance_get(cr, graph, inst_id, data.get('nested', False))
+ ps_string = graph.create(prog='dot', format='ps')
+ except Exception:
+ _logger.exception('Exception in call:')
+ # string is in PS, like the success message would have been
+ ps_string = '''%PS-Adobe-3.0
+/inch {72 mul} def
+/Times-Roman findfont 50 scalefont setfont
+1.5 inch 15 inch moveto
+(No workflow available) show
+showpage'''
+ if os.name == "nt":
+ prog = 'ps2pdf.bat'
+ else:
+ prog = 'ps2pdf'
+ args = (prog, '-', '-')
+ input, output = tools.exec_command_pipe(*args)
+ input.write(ps_string)
+ input.close()
+ self.result = output.read()
+ output.close()
+ self.done = True
+
+ def is_done(self):
+ return self.done
+
+ def get(self):
+ if self.done:
+ return self.result
+ else:
+ return None
+
+class report_graph(report.interface.report_int):
+ def __init__(self, name, table):
+ report.interface.report_int.__init__(self, name)
+ self.table = table
+
+ def result(self):
+ if self.obj.is_done():
+ return True, self.obj.get(), 'pdf'
+ else:
+ return False, False, False
+
+ def create(self, cr, uid, ids, data, context=None):
+ self.obj = report_graph_instance(cr, uid, ids, data)
+ return self.obj.get(), 'pdf'
+
+report_graph('report.workflow.instance.graph', 'ir.workflow')
+
+#!/usr/bin/env python
+#Copyright (C) 2009-2010 :
+# Gabes Jean, naparuba@gmail.com
+# Gerhard Lausser, Gerhard.Lausser@consol.de
+# Gregory Starck, g.starck@gmail.com
+# Hartmut Goebel, h.goebel@goebel-consult.de
+#
+#This file is part of Shinken.
+#
+#Shinken is free software: you can redistribute it and/or modify
+#it under the terms of the GNU Affero General Public License as published by
+#the Free Software Foundation, either version 3 of the License, or
+#(at your option) any later version.
+#
+#Shinken is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU Affero General Public License for more details.
+#
+#You should have received a copy of the GNU Affero General Public License
+#along with Shinken. If not, see .
+
+#The resultmodulation class is used for in scheduler modulation of resulsts
+#like the return code or the output.
+
+import time
+
+from item import Item, Items
+
+from shinken.property import StringProp, IntegerProp
+
+class Businessimpactmodulation(Item):
+ id = 1#0 is always special in database, so we do not take risk here
+ my_type = 'businessimpactmodulation'
+
+ properties = Item.properties.copy()
+ properties.update({
+ 'business_impact_modulation_name': StringProp(),
+ 'business_impact': IntegerProp(),
+ 'modulation_period': StringProp(default=None),
+ })
+
+
+ # For debugging purpose only (nice name)
+ def get_name(self):
+ return self.business_impact_modulation_name
+
+
+
+class Businessimpactmodulations(Items):
+ name_property = "business_impact_modulation_name"
+ inner_class = Businessimpactmodulation
+
+
+ def linkify(self, timeperiods):
+ self.linkify_cm_by_tp(timeperiods)
+
+
+ # We just search for each timeperiod the tp
+ # and replace the name by the tp
+ def linkify_cm_by_tp(self, timeperiods):
+ for rm in self:
+ mtp_name = rm.modulation_period.strip()
+
+ # The new member list, in id
+ mtp = timeperiods.find_by_name(mtp_name)
+
+ if mtp_name != '' and mtp is None:
+ err = "Error : the business impact modulation '%s' got an unknown modulation_period '%s'" % (rm.get_name(), mtp_name)
+ rm.configuration_errors.append(err)
+
+ rm.modulation_period = mtp
+
+"""
+ Installation script for accelerated upgrade
+"""
+import codecs
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+from version import get_version
+
+import re
+
+DESCRIPTION = 'Description'
+with codecs.open('README.md', 'r', encoding='UTF-8') as readme:
+ LONG_DESCRIPTION = ''.join(readme)
+
+CLASSIFIERS = [
+ 'Development Status :: 5 - Production/Stable',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2.7',
+ 'Intended Audience :: Developers',
+ 'Natural Language :: English',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: POSIX :: Linux',
+]
+
+packages = [
+ 'vertest',
+]
+
+NAME = 'vertest'
+
+# setup_args["version"] = "1.0.0"
+
+setup_args = dict(
+
+ name=NAME,
+ version="1.0.0",
+ description=DESCRIPTION,
+ long_description=LONG_DESCRIPTION,
+ author='Klaudiusz Staniek',
+ author_email='klstanie [at] cisco.com',
+ url='https://github.com/kstaniek/version',
+ download_url='https://github.com/kstaniek/version/tarball/{}'.format(get_version()),
+ keywords='version test',
+ tests_require=['tox'],
+ platforms=['any'],
+ packages=packages,
+ package_data={'': ['LICENSE', ], },
+ package_dir={'vertest': 'vertest'},
+ include_package_data=True,
+ install_requires=[],
+ data_files=[],
+ license='Apache 2.0',
+ classifiers=CLASSIFIERS,
+ zip_safe=False
+)
+
+
+if __name__=="__main__":
+ if 'upload' in sys.argv:
+ import package
+ package.__version__.verify(setup_args['version'])
+ setup(**setup_args)
+"""
+If you know what an abstract syntax tree (AST) is, you'll see that this module
+is pretty much that. The classes represent syntax elements like functions and
+imports.
+
+This is the "business logic" part of the parser. There's a lot of logic here
+that makes it easier for Jedi (and other libraries to deal with a Python syntax
+tree.
+
+By using `get_code` on a module, you can get back the 1-to-1 representation of
+the input given to the parser. This is important if you are using refactoring.
+
+The easiest way to play with this module is to use :class:`parsing.Parser`.
+:attr:`parsing.Parser.module` holds an instance of :class:`Module`:
+
+>>> from jedi._compatibility import u
+>>> from jedi.parser import Parser, load_grammar
+>>> parser = Parser(load_grammar(), u('import os'), 'example.py')
+>>> submodule = parser.module
+>>> submodule
+
+
+Any subclasses of :class:`Scope`, including :class:`Module` has an attribute
+:attr:`imports `:
+
+>>> submodule.imports
+[]
+
+See also :attr:`Scope.subscopes` and :attr:`Scope.statements`.
+"""
+import os
+import re
+from inspect import cleandoc
+from itertools import chain
+import textwrap
+
+from jedi._compatibility import (Python3Method, encoding, is_py3, utf8_repr,
+ literal_eval, use_metaclass, unicode)
+from jedi import cache
+
+
+def is_node(node, *symbol_names):
+ try:
+ type = node.type
+ except AttributeError:
+ return False
+ else:
+ return type in symbol_names
+
+
+class PositionModifier(object):
+ """A start_pos modifier for the fast parser."""
+ def __init__(self):
+ self.line = 0
+
+
+zero_position_modifier = PositionModifier()
+
+
+class DocstringMixin(object):
+ __slots__ = ()
+
+ @property
+ def raw_doc(self):
+ """ Returns a cleaned version of the docstring token. """
+ if isinstance(self, Module):
+ node = self.children[0]
+ elif isinstance(self, ClassOrFunc):
+ node = self.children[self.children.index(':') + 1]
+ if is_node(node, 'suite'): # Normally a suite
+ node = node.children[2] # -> NEWLINE INDENT stmt
+ else: # ExprStmt
+ simple_stmt = self.parent
+ c = simple_stmt.parent.children
+ index = c.index(simple_stmt)
+ if not index:
+ return ''
+ node = c[index - 1]
+
+ if is_node(node, 'simple_stmt'):
+ node = node.children[0]
+
+ if node.type == 'string':
+ # TODO We have to check next leaves until there are no new
+ # leaves anymore that might be part of the docstring. A
+ # docstring can also look like this: ``'foo' 'bar'
+ # Returns a literal cleaned version of the ``Token``.
+ cleaned = cleandoc(literal_eval(node.value))
+ # Since we want the docstr output to be always unicode, just
+ # force it.
+ if is_py3 or isinstance(cleaned, unicode):
+ return cleaned
+ else:
+ return unicode(cleaned, 'UTF-8', 'replace')
+ return ''
+
+
+class Base(object):
+ """
+ This is just here to have an isinstance check, which is also used on
+ evaluate classes. But since they have sometimes a special type of
+ delegation, it is important for those classes to override this method.
+
+ I know that there is a chance to do such things with __instancecheck__, but
+ since Python 2.5 doesn't support it, I decided to do it this way.
+ """
+ __slots__ = ()
+
+ def isinstance(self, *cls):
+ return isinstance(self, cls)
+
+ @Python3Method
+ def get_parent_until(self, classes=(), reverse=False,
+ include_current=True):
+ """
+ Searches the parent "chain" until the object is an instance of
+ classes. If classes is empty return the last parent in the chain
+ (is without a parent).
+ """
+ if type(classes) not in (tuple, list):
+ classes = (classes,)
+ scope = self if include_current else self.parent
+ while scope.parent is not None:
+ # TODO why if classes?
+ if classes and reverse != scope.isinstance(*classes):
+ break
+ scope = scope.parent
+ return scope
+
+ def get_parent_scope(self, include_flows=False):
+ """
+ Returns the underlying scope.
+ """
+ scope = self.parent
+ while scope is not None:
+ if include_flows and isinstance(scope, Flow):
+ return scope
+ if scope.is_scope():
+ break
+ scope = scope.parent
+ return scope
+
+ def is_scope(self):
+ # Default is not being a scope. Just inherit from Scope.
+ return False
+
+
+class Leaf(Base):
+ __slots__ = ('position_modifier', 'value', 'parent', '_start_pos', 'prefix')
+
+ def __init__(self, position_modifier, value, start_pos, prefix=''):
+ self.position_modifier = position_modifier
+ self.value = value
+ self._start_pos = start_pos
+ self.prefix = prefix
+ self.parent = None
+
+ @property
+ def start_pos(self):
+ return self._start_pos[0] + self.position_modifier.line, self._start_pos[1]
+
+ @start_pos.setter
+ def start_pos(self, value):
+ self._start_pos = value[0] - self.position_modifier.line, value[1]
+
+ @property
+ def end_pos(self):
+ return (self._start_pos[0] + self.position_modifier.line,
+ self._start_pos[1] + len(self.value))
+
+ def move(self, line_offset, column_offset):
+ self._start_pos = (self._start_pos[0] + line_offset,
+ self._start_pos[1] + column_offset)
+
+ def get_previous(self):
+ """
+ Returns the previous leaf in the parser tree.
+ """
+ node = self
+ while True:
+ c = node.parent.children
+ i = c.index(self)
+ if i == 0:
+ node = node.parent
+ if node.parent is None:
+ raise IndexError('Cannot access the previous element of the first one.')
+ else:
+ node = c[i - 1]
+ break
+
+ while True:
+ try:
+ node = node.children[-1]
+ except AttributeError: # A Leaf doesn't have children.
+ return node
+
+ def get_code(self):
+ return self.prefix + self.value
+
+ def next_sibling(self):
+ """
+ The node immediately following the invocant in their parent's children
+ list. If the invocant does not have a next sibling, it is None
+ """
+ # Can't use index(); we need to test by identity
+ for i, child in enumerate(self.parent.children):
+ if child is self:
+ try:
+ return self.parent.children[i + 1]
+ except IndexError:
+ return None
+
+ def prev_sibling(self):
+ """
+ The node/leaf immediately preceding the invocant in their parent's
+ children list. If the invocant does not have a previous sibling, it is
+ None.
+ """
+ # Can't use index(); we need to test by identity
+ for i, child in enumerate(self.parent.children):
+ if child is self:
+ if i == 0:
+ return None
+ return self.parent.children[i - 1]
+
+ @utf8_repr
+ def __repr__(self):
+ return "<%s: %s>" % (type(self).__name__, self.value)
+
+
+class LeafWithNewLines(Leaf):
+ __slots__ = ()
+
+ @property
+ def end_pos(self):
+ """
+ Literals and whitespace end_pos are more complicated than normal
+ end_pos, because the containing newlines may change the indexes.
+ """
+ end_pos_line, end_pos_col = self.start_pos
+ lines = self.value.split('\n')
+ end_pos_line += len(lines) - 1
+ # Check for multiline token
+ if self.start_pos[0] == end_pos_line:
+ end_pos_col += len(lines[-1])
+ else:
+ end_pos_col = len(lines[-1])
+ return end_pos_line, end_pos_col
+
+
+ @utf8_repr
+ def __repr__(self):
+ return "<%s: %r>" % (type(self).__name__, self.value)
+
+class Whitespace(LeafWithNewLines):
+ """Contains NEWLINE and ENDMARKER tokens."""
+ __slots__ = ()
+ type = 'whitespace'
+
+
+class Name(Leaf):
+ """
+ A string. Sometimes it is important to know if the string belongs to a name
+ or not.
+ """
+ type = 'name'
+ __slots__ = ()
+
+ def __str__(self):
+ return self.value
+
+ def __unicode__(self):
+ return self.value
+
+ def __repr__(self):
+ return "<%s: %s@%s,%s>" % (type(self).__name__, self.value,
+ self.start_pos[0], self.start_pos[1])
+
+ def get_definition(self):
+ scope = self
+ while scope.parent is not None:
+ parent = scope.parent
+ if scope.isinstance(Node, Name) and parent.type != 'simple_stmt':
+ if scope.type == 'testlist_comp':
+ try:
+ if isinstance(scope.children[1], CompFor):
+ return scope.children[1]
+ except IndexError:
+ pass
+ scope = parent
+ else:
+ break
+ return scope
+
+ def is_definition(self):
+ stmt = self.get_definition()
+ if stmt.type in ('funcdef', 'classdef', 'file_input', 'param'):
+ return self == stmt.name
+ elif stmt.type == 'for_stmt':
+ return self.start_pos < stmt.children[2].start_pos
+ elif stmt.type == 'try_stmt':
+ return self.prev_sibling() == 'as'
+ else:
+ return stmt.type in ('expr_stmt', 'import_name', 'import_from',
+ 'comp_for', 'with_stmt') \
+ and self in stmt.get_defined_names()
+
+ def assignment_indexes(self):
+ """
+ Returns an array of ints of the indexes that are used in tuple
+ assignments.
+
+ For example if the name is ``y`` in the following code::
+
+ x, (y, z) = 2, ''
+
+ would result in ``[1, 0]``.
+ """
+ indexes = []
+ node = self.parent
+ compare = self
+ while node is not None:
+ if is_node(node, 'testlist_comp', 'testlist_star_expr', 'exprlist'):
+ for i, child in enumerate(node.children):
+ if child == compare:
+ indexes.insert(0, int(i / 2))
+ break
+ else:
+ raise LookupError("Couldn't find the assignment.")
+ elif isinstance(node, (ExprStmt, CompFor)):
+ break
+
+ compare = node
+ node = node.parent
+ return indexes
+
+
+class Literal(LeafWithNewLines):
+ __slots__ = ()
+
+ def eval(self):
+ return literal_eval(self.value)
+
+
+class Number(Literal):
+ type = 'number'
+ __slots__ = ()
+
+
+class String(Literal):
+ type = 'string'
+ __slots__ = ()
+
+
+class Operator(Leaf):
+ type = 'operator'
+ __slots__ = ()
+
+ def __str__(self):
+ return self.value
+
+ def __eq__(self, other):
+ """
+ Make comparisons with strings easy.
+ Improves the readability of the parser.
+ """
+ if isinstance(other, Operator):
+ return self is other
+ else:
+ return self.value == other
+
+ def __ne__(self, other):
+ """Python 2 compatibility."""
+ return self.value != other
+
+ def __hash__(self):
+ return hash(self.value)
+
+
+class Keyword(Leaf):
+ type = 'keyword'
+ __slots__ = ()
+
+ def __eq__(self, other):
+ """
+ Make comparisons with strings easy.
+ Improves the readability of the parser.
+ """
+ if isinstance(other, Keyword):
+ return self is other
+ return self.value == other
+
+ def __ne__(self, other):
+ """Python 2 compatibility."""
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return hash(self.value)
+
+
+class BaseNode(Base):
+ """
+ The super class for Scope, Import, Name and Statement. Every object in
+ the parser tree inherits from this class.
+ """
+ __slots__ = ('children', 'parent')
+ type = None
+
+ def __init__(self, children):
+ """
+ Initialize :class:`BaseNode`.
+
+ :param children: The module in which this Python object locates.
+ """
+ for c in children:
+ c.parent = self
+ self.children = children
+ self.parent = None
+
+ def move(self, line_offset, column_offset):
+ """
+ Move the Node's start_pos.
+ """
+ for c in self.children:
+ c.move(line_offset, column_offset)
+
+ @property
+ def start_pos(self):
+ return self.children[0].start_pos
+
+ @property
+ def end_pos(self):
+ return self.children[-1].end_pos
+
+ def get_code(self):
+ return "".join(c.get_code() for c in self.children)
+
+ @Python3Method
+ def name_for_position(self, position):
+ for c in self.children:
+ if isinstance(c, Leaf):
+ if isinstance(c, Name) and c.start_pos <= position <= c.end_pos:
+ return c
+ else:
+ result = c.name_for_position(position)
+ if result is not None:
+ return result
+ return None
+
+ @Python3Method
+ def get_statement_for_position(self, pos):
+ for c in self.children:
+ if c.start_pos <= pos <= c.end_pos:
+ if c.type not in ('decorated', 'simple_stmt', 'suite') \
+ and not isinstance(c, (Flow, ClassOrFunc)):
+ return c
+ else:
+ try:
+ return c.get_statement_for_position(pos)
+ except AttributeError:
+ pass # Must be a non-scope
+ return None
+
+ def first_leaf(self):
+ try:
+ return self.children[0].first_leaf()
+ except AttributeError:
+ return self.children[0]
+
+ @utf8_repr
+ def __repr__(self):
+ code = self.get_code().replace('\n', ' ')
+ if not is_py3:
+ code = code.encode(encoding, 'replace')
+ return "<%s: %s@%s,%s>" % \
+ (type(self).__name__, code, self.start_pos[0], self.start_pos[1])
+
+
+class Node(BaseNode):
+ """Concrete implementation for interior nodes."""
+ __slots__ = ('type',)
+
+ def __init__(self, type, children):
+ """
+ Initializer.
+
+ Takes a type constant (a symbol number >= 256), a sequence of
+ child nodes, and an optional context keyword argument.
+
+ As a side effect, the parent pointers of the children are updated.
+ """
+ super(Node, self).__init__(children)
+ self.type = type
+
+ def __repr__(self):
+ return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children)
+
+
+class IsScopeMeta(type):
+ def __instancecheck__(self, other):
+ return other.is_scope()
+
+
+class IsScope(use_metaclass(IsScopeMeta)):
+ pass
+
+
+class Scope(BaseNode, DocstringMixin):
+ """
+ Super class for the parser tree, which represents the state of a python
+ text file.
+ A Scope manages and owns its subscopes, which are classes and functions, as
+ well as variables and imports. It is used to access the structure of python
+ files.
+
+ :param start_pos: The position (line and column) of the scope.
+ :type start_pos: tuple(int, int)
+ """
+ __slots__ = ('names_dict',)
+
+ def __init__(self, children):
+ super(Scope, self).__init__(children)
+
+ @property
+ def returns(self):
+ # Needed here for fast_parser, because the fast_parser splits and
+ # returns will be in "normal" modules.
+ return self._search_in_scope(ReturnStmt)
+
+ @property
+ def subscopes(self):
+ return self._search_in_scope(Scope)
+
+ @property
+ def flows(self):
+ return self._search_in_scope(Flow)
+
+ @property
+ def imports(self):
+ return self._search_in_scope(Import)
+
+ @Python3Method
+ def _search_in_scope(self, typ):
+ def scan(children):
+ elements = []
+ for element in children:
+ if isinstance(element, typ):
+ elements.append(element)
+ if is_node(element, 'suite', 'simple_stmt', 'decorated') \
+ or isinstance(element, Flow):
+ elements += scan(element.children)
+ return elements
+
+ return scan(self.children)
+
+ @property
+ def statements(self):
+ return self._search_in_scope((ExprStmt, KeywordStatement))
+
+ def is_scope(self):
+ return True
+
+ def __repr__(self):
+ try:
+ name = self.path
+ except AttributeError:
+ try:
+ name = self.name
+ except AttributeError:
+ name = self.command
+
+ return "<%s: %s@%s-%s>" % (type(self).__name__, name,
+ self.start_pos[0], self.end_pos[0])
+
+ def walk(self):
+ yield self
+ for s in self.subscopes:
+ for scope in s.walk():
+ yield scope
+
+ for r in self.statements:
+ while isinstance(r, Flow):
+ for scope in r.walk():
+ yield scope
+ r = r.next
+
+
+class Module(Scope):
+ """
+ The top scope, which is always a module.
+ Depending on the underlying parser this may be a full module or just a part
+ of a module.
+ """
+ __slots__ = ('path', 'global_names', 'used_names', '_name',
+ 'error_statement_stacks')
+ type = 'file_input'
+
+ def __init__(self, children):
+ """
+ Initialize :class:`Module`.
+
+ :type path: str
+ :arg path: File path to this module.
+
+ .. todo:: Document `top_module`.
+ """
+ super(Module, self).__init__(children)
+ self.path = None # Set later.
+
+ @property
+ @cache.underscore_memoization
+ def name(self):
+ """ This is used for the goto functions. """
+ if self.path is None:
+ string = '' # no path -> empty name
+ else:
+ sep = (re.escape(os.path.sep),) * 2
+ r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self.path)
+ # Remove PEP 3149 names
+ string = re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1))
+ # Positions are not real, but a module starts at (1, 0)
+ p = (1, 0)
+ name = Name(zero_position_modifier, string, p)
+ name.parent = self
+ return name
+
+ @property
+ def has_explicit_absolute_import(self):
+ """
+ Checks if imports in this module are explicitly absolute, i.e. there
+ is a ``__future__`` import.
+ """
+ # TODO this is a strange scan and not fully correct. I think Python's
+ # parser does it in a different way and scans for the first
+ # statement/import with a tokenizer (to check for syntax changes like
+ # the future print statement).
+ for imp in self.imports:
+ if imp.type == 'import_from' and imp.level == 0:
+ for path in imp.paths():
+ if [str(name) for name in path] == ['__future__', 'absolute_import']:
+ return True
+ return False
+
+
+class Decorator(BaseNode):
+ type = 'decorator'
+ __slots__ = ()
+
+
+class ClassOrFunc(Scope):
+ __slots__ = ()
+
+ @property
+ def name(self):
+ return self.children[1]
+
+ def get_decorators(self):
+ decorated = self.parent
+ if is_node(decorated, 'decorated'):
+ if is_node(decorated.children[0], 'decorators'):
+ return decorated.children[0].children
+ else:
+ return decorated.children[:1]
+ else:
+ return []
+
+
+class Class(ClassOrFunc):
+ """
+ Used to store the parsed contents of a python class.
+
+ :param name: The Class name.
+ :type name: str
+ :param supers: The super classes of a Class.
+ :type supers: list
+ :param start_pos: The start position (line, column) of the class.
+ :type start_pos: tuple(int, int)
+ """
+ type = 'classdef'
+ __slots__ = ()
+
+ def __init__(self, children):
+ super(Class, self).__init__(children)
+
+ def get_super_arglist(self):
+ if self.children[2] != '(': # Has no parentheses
+ return None
+ else:
+ if self.children[3] == ')': # Empty parentheses
+ return None
+ else:
+ return self.children[3]
+
+ @property
+ def doc(self):
+ """
+ Return a document string including call signature of __init__.
+ """
+ docstr = self.raw_doc
+ for sub in self.subscopes:
+ if str(sub.name) == '__init__':
+ return '%s\n\n%s' % (
+ sub.get_call_signature(func_name=self.name), docstr)
+ return docstr
+
+
+def _create_params(parent, argslist_list):
+ """
+ `argslist_list` is a list that can contain an argslist as a first item, but
+ most not. It's basically the items between the parameter brackets (which is
+ at most one item).
+ This function modifies the parser structure. It generates `Param` objects
+ from the normal ast. Those param objects do not exist in a normal ast, but
+ make the evaluation of the ast tree so much easier.
+ You could also say that this function replaces the argslist node with a
+ list of Param objects.
+ """
+ def check_python2_nested_param(node):
+ """
+ Python 2 allows params to look like ``def x(a, (b, c))``, which is
+ basically a way of unpacking tuples in params. Python 3 has ditched
+ this behavior. Jedi currently just ignores those constructs.
+ """
+ return node.type == 'tfpdef' and node.children[0] == '('
+
+ try:
+ first = argslist_list[0]
+ except IndexError:
+ return []
+
+ if first.type in ('name', 'tfpdef'):
+ if check_python2_nested_param(first):
+ return []
+ else:
+ return [Param([first], parent)]
+ else: # argslist is a `typedargslist` or a `varargslist`.
+ children = first.children
+ params = []
+ start = 0
+ # Start with offset 1, because the end is higher.
+ for end, child in enumerate(children + [None], 1):
+ if child is None or child == ',':
+ new_children = children[start:end]
+ if new_children: # Could as well be comma and then end.
+ if check_python2_nested_param(new_children[0]):
+ continue
+ params.append(Param(new_children, parent))
+ start = end
+ return params
+
+
+class Function(ClassOrFunc):
+ """
+ Used to store the parsed contents of a python function.
+ """
+ __slots__ = ('listeners',)
+ type = 'funcdef'
+
+ def __init__(self, children):
+ super(Function, self).__init__(children)
+ self.listeners = set() # not used here, but in evaluation.
+ parameters = self.children[2] # After `def foo`
+ parameters.children[1:-1] = _create_params(parameters, parameters.children[1:-1])
+
+ @property
+ def params(self):
+ return self.children[2].children[1:-1]
+
+ @property
+ def name(self):
+ return self.children[1] # First token after `def`
+
+ @property
+ def yields(self):
+ # TODO This is incorrect, yields are also possible in a statement.
+ return self._search_in_scope(YieldExpr)
+
+ def is_generator(self):
+ return bool(self.yields)
+
+ def annotation(self):
+ try:
+ return self.children[6] # 6th element: def foo(...) -> bar
+ except IndexError:
+ return None
+
+ def get_call_signature(self, width=72, func_name=None):
+ """
+ Generate call signature of this function.
+
+ :param width: Fold lines if a line is longer than this value.
+ :type width: int
+ :arg func_name: Override function name when given.
+ :type func_name: str
+
+ :rtype: str
+ """
+ func_name = func_name or self.children[1]
+ code = unicode(func_name) + self.children[2].get_code()
+ return '\n'.join(textwrap.wrap(code, width))
+
+ @property
+ def doc(self):
+ """ Return a document string including call signature. """
+ docstr = self.raw_doc
+ return '%s\n\n%s' % (self.get_call_signature(), docstr)
+
+
+class Lambda(Function):
+ """
+ Lambdas are basically trimmed functions, so give it the same interface.
+ """
+ type = 'lambda'
+ __slots__ = ()
+
+ def __init__(self, children):
+ # We don't want to call the Function constructor, call its parent.
+ super(Function, self).__init__(children)
+ self.listeners = set() # not used here, but in evaluation.
+ lst = self.children[1:-2] # After `def foo`
+ self.children[1:-2] = _create_params(self, lst)
+
+ @property
+ def params(self):
+ return self.children[1:-2]
+
+ def is_generator(self):
+ return False
+
+ def yields(self):
+ return []
+
+ def __repr__(self):
+ return "<%s@%s>" % (self.__class__.__name__, self.start_pos)
+
+
+class Flow(BaseNode):
+ __slots__ = ()
+
+
+class IfStmt(Flow):
+ type = 'if_stmt'
+ __slots__ = ()
+
+ def check_nodes(self):
+ """
+ Returns all the `test` nodes that are defined as x, here:
+
+ if x:
+ pass
+ elif x:
+ pass
+ """
+ for i, c in enumerate(self.children):
+ if c in ('elif', 'if'):
+ yield self.children[i + 1]
+
+ def node_in_which_check_node(self, node):
+ for check_node in reversed(list(self.check_nodes())):
+ if check_node.start_pos < node.start_pos:
+ return check_node
+
+ def node_after_else(self, node):
+ """
+ Checks if a node is defined after `else`.
+ """
+ for c in self.children:
+ if c == 'else':
+ if node.start_pos > c.start_pos:
+ return True
+ else:
+ return False
+
+
+class WhileStmt(Flow):
+ type = 'while_stmt'
+ __slots__ = ()
+
+
+class ForStmt(Flow):
+ type = 'for_stmt'
+ __slots__ = ()
+
+
+class TryStmt(Flow):
+ type = 'try_stmt'
+ __slots__ = ()
+
+ def except_clauses(self):
+ """
+ Returns the ``test`` nodes found in ``except_clause`` nodes.
+ Returns ``[None]`` for except clauses without an exception given.
+ """
+ for node in self.children:
+ if node.type == 'except_clause':
+ yield node.children[1]
+ elif node == 'except':
+ yield None
+
+
+class WithStmt(Flow):
+ type = 'with_stmt'
+ __slots__ = ()
+
+ def get_defined_names(self):
+ names = []
+ for with_item in self.children[1:-2:2]:
+ # Check with items for 'as' names.
+ if is_node(with_item, 'with_item'):
+ names += _defined_names(with_item.children[2])
+ return names
+
+ def node_from_name(self, name):
+ node = name
+ while True:
+ node = node.parent
+ if is_node(node, 'with_item'):
+ return node.children[0]
+
+
+class Import(BaseNode):
+ __slots__ = ()
+
+ def path_for_name(self, name):
+ try:
+ # The name may be an alias. If it is, just map it back to the name.
+ name = self.aliases()[name]
+ except KeyError:
+ pass
+
+ for path in self.paths():
+ if name in path:
+ return path[:path.index(name) + 1]
+ raise ValueError('Name should be defined in the import itself')
+
+ def is_nested(self):
+ return False # By default, sub classes may overwrite this behavior
+
+ def is_star_import(self):
+ return self.children[-1] == '*'
+
+
+class ImportFrom(Import):
+ type = 'import_from'
+ __slots__ = ()
+
+ def get_defined_names(self):
+ return [alias or name for name, alias in self._as_name_tuples()]
+
+ def aliases(self):
+ """Mapping from alias to its corresponding name."""
+ return dict((alias, name) for name, alias in self._as_name_tuples()
+ if alias is not None)
+
+ def get_from_names(self):
+ for n in self.children[1:]:
+ if n not in ('.', '...'):
+ break
+ if is_node(n, 'dotted_name'): # from x.y import
+ return n.children[::2]
+ elif n == 'import': # from . import
+ return []
+ else: # from x import
+ return [n]
+
+ @property
+ def level(self):
+ """The level parameter of ``__import__``."""
+ level = 0
+ for n in self.children[1:]:
+ if n in ('.', '...'):
+ level += len(n.value)
+ else:
+ break
+ return level
+
+ def _as_name_tuples(self):
+ last = self.children[-1]
+ if last == ')':
+ last = self.children[-2]
+ elif last == '*':
+ return # No names defined directly.
+
+ if is_node(last, 'import_as_names'):
+ as_names = last.children[::2]
+ else:
+ as_names = [last]
+ for as_name in as_names:
+ if as_name.type == 'name':
+ yield as_name, None
+ else:
+ yield as_name.children[::2] # yields x, y -> ``x as y``
+
+ def star_import_name(self):
+ """
+ The last name defined in a star import.
+ """
+ return self.paths()[-1][-1]
+
+ def paths(self):
+ """
+ The import paths defined in an import statement. Typically an array
+ like this: ``[, ]``.
+ """
+ dotted = self.get_from_names()
+
+ if self.children[-1] == '*':
+ return [dotted]
+ return [dotted + [name] for name, alias in self._as_name_tuples()]
+
+
+class ImportName(Import):
+ """For ``import_name`` nodes. Covers normal imports without ``from``."""
+ type = 'import_name'
+ __slots__ = ()
+
+ def get_defined_names(self):
+ return [alias or path[0] for path, alias in self._dotted_as_names()]
+
+ @property
+ def level(self):
+ """The level parameter of ``__import__``."""
+ return 0 # Obviously 0 for imports without from.
+
+ def paths(self):
+ return [path for path, alias in self._dotted_as_names()]
+
+ def _dotted_as_names(self):
+ """Generator of (list(path), alias) where alias may be None."""
+ dotted_as_names = self.children[1]
+ if is_node(dotted_as_names, 'dotted_as_names'):
+ as_names = dotted_as_names.children[::2]
+ else:
+ as_names = [dotted_as_names]
+
+ for as_name in as_names:
+ if is_node(as_name, 'dotted_as_name'):
+ alias = as_name.children[2]
+ as_name = as_name.children[0]
+ else:
+ alias = None
+ if as_name.type == 'name':
+ yield [as_name], alias
+ else:
+ # dotted_names
+ yield as_name.children[::2], alias
+
+ def is_nested(self):
+ """
+ This checks for the special case of nested imports, without aliases and
+ from statement::
+
+ import foo.bar
+ """
+ return [1 for path, alias in self._dotted_as_names()
+ if alias is None and len(path) > 1]
+
+ def aliases(self):
+ return dict((alias, path[-1]) for path, alias in self._dotted_as_names()
+ if alias is not None)
+
+
+class KeywordStatement(BaseNode):
+ """
+ For the following statements: `assert`, `del`, `global`, `nonlocal`,
+ `raise`, `return`, `yield`, `pass`, `continue`, `break`, `return`, `yield`.
+ """
+ __slots__ = ()
+
+ @property
+ def keyword(self):
+ return self.children[0].value
+
+
+class AssertStmt(KeywordStatement):
+ type = 'assert_stmt'
+ __slots__ = ()
+
+ def assertion(self):
+ return self.children[1]
+
+
+class GlobalStmt(KeywordStatement):
+ type = 'global_stmt'
+ __slots__ = ()
+
+ def get_defined_names(self):
+ return []
+
+ def get_global_names(self):
+ return self.children[1::2]
+
+
+class ReturnStmt(KeywordStatement):
+ type = 'return_stmt'
+ __slots__ = ()
+
+
+class YieldExpr(BaseNode):
+ type = 'yield_expr'
+ __slots__ = ()
+
+
+def _defined_names(current):
+ """
+ A helper function to find the defined names in statements, for loops and
+ list comprehensions.
+ """
+ names = []
+ if is_node(current, 'testlist_star_expr', 'testlist_comp', 'exprlist'):
+ for child in current.children[::2]:
+ names += _defined_names(child)
+ elif is_node(current, 'atom'):
+ names += _defined_names(current.children[1])
+ elif is_node(current, 'power'):
+ if current.children[-2] != '**': # Just if there's no operation
+ trailer = current.children[-1]
+ if trailer.children[0] == '.':
+ names.append(trailer.children[1])
+ else:
+ names.append(current)
+ return names
+
+
+class ExprStmt(BaseNode, DocstringMixin):
+ type = 'expr_stmt'
+ __slots__ = ()
+
+ def get_defined_names(self):
+ return list(chain.from_iterable(_defined_names(self.children[i])
+ for i in range(0, len(self.children) - 2, 2)
+ if '=' in self.children[i + 1].value))
+
+ def get_rhs(self):
+ """Returns the right-hand-side of the equals."""
+ return self.children[-1]
+
+ def first_operation(self):
+ """
+ Returns `+=`, `=`, etc or None if there is no operation.
+ """
+ try:
+ return self.children[1]
+ except IndexError:
+ return None
+
+
+class Param(BaseNode):
+ """
+ It's a helper class that makes business logic with params much easier. The
+ Python grammar defines no ``param`` node. It defines it in a different way
+ that is not really suited to working with parameters.
+ """
+ type = 'param'
+
+ def __init__(self, children, parent):
+ super(Param, self).__init__(children)
+ self.parent = parent
+ for child in children:
+ child.parent = self
+
+ @property
+ def stars(self):
+ first = self.children[0]
+ if first in ('*', '**'):
+ return len(first.value)
+ return 0
+
+ @property
+ def default(self):
+ try:
+ return self.children[int(self.children[0] in ('*', '**')) + 2]
+ except IndexError:
+ return None
+
+ def annotation(self):
+ # Generate from tfpdef.
+ raise NotImplementedError
+
+ def _tfpdef(self):
+ """
+ tfpdef: see grammar.txt.
+ """
+ offset = int(self.children[0] in ('*', '**'))
+ return self.children[offset]
+
+ @property
+ def name(self):
+ if is_node(self._tfpdef(), 'tfpdef'):
+ return self._tfpdef().children[0]
+ else:
+ return self._tfpdef()
+
+ @property
+ def position_nr(self):
+ return self.parent.children.index(self) - 1
+
+ @property
+ def parent_function(self):
+ return self.get_parent_until(IsScope)
+
+ def __repr__(self):
+ default = '' if self.default is None else '=%s' % self.default
+ return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default)
+
+
+class CompFor(BaseNode):
+ type = 'comp_for'
+ __slots__ = ()
+
+ def is_scope(self):
+ return True
+
+ @property
+ def names_dict(self):
+ dct = {}
+ for name in self.get_defined_names():
+ arr = dct.setdefault(name.value, [])
+ arr.append(name)
+ return dct
+
+ def names_dicts(self, search_global):
+ yield self.names_dict
+
+ def get_defined_names(self):
+ return _defined_names(self.children[1])
+
+# $Id: __init__.py 7648 2013-04-18 07:36:22Z milde $
+# Authors: David Goodger ; Ueli Schlaepfer
+# Copyright: This module has been placed in the public domain.
+
+"""
+This package contains Docutils Reader modules.
+"""
+
+__docformat__ = 'reStructuredText'
+
+import sys
+
+from docutils import utils, parsers, Component
+from docutils.transforms import universal
+if sys.version_info < (2,5):
+ from docutils._compat import __import__
+
+
+class Reader(Component):
+
+ """
+ Abstract base class for docutils Readers.
+
+ Each reader module or package must export a subclass also called 'Reader'.
+
+ The two steps of a Reader's responsibility are `scan()` and
+ `parse()`. Call `read()` to process a document.
+ """
+
+ component_type = 'reader'
+ config_section = 'readers'
+
+ def get_transforms(self):
+ return Component.get_transforms(self) + [
+ universal.Decorations,
+ universal.ExposeInternals,
+ universal.StripComments,]
+
+ def __init__(self, parser=None, parser_name=None):
+ """
+ Initialize the Reader instance.
+
+ Several instance attributes are defined with dummy initial values.
+ Subclasses may use these attributes as they wish.
+ """
+
+ self.parser = parser
+ """A `parsers.Parser` instance shared by all doctrees. May be left
+ unspecified if the document source determines the parser."""
+
+ if parser is None and parser_name:
+ self.set_parser(parser_name)
+
+ self.source = None
+ """`docutils.io` IO object, source of input data."""
+
+ self.input = None
+ """Raw text input; either a single string or, for more complex cases,
+ a collection of strings."""
+
+ def set_parser(self, parser_name):
+ """Set `self.parser` by name."""
+ parser_class = parsers.get_parser_class(parser_name)
+ self.parser = parser_class()
+
+ def read(self, source, parser, settings):
+ self.source = source
+ if not self.parser:
+ self.parser = parser
+ self.settings = settings
+ self.input = self.source.read()
+ self.parse()
+ return self.document