def parse(self): """Parse `self.input` into a document tree.""" self.document = document = self.new_document() self.parser.parse(self.input, document) document.current_source = document.current_line = None def new_document(self): """Create and return a new empty document tree (root node).""" document = utils.new_document(self.source.source_path, self.settings) return document class ReReader(Reader): """ A reader which rereads an existing document tree (e.g. a deserializer). Often used in conjunction with `writers.UnfilteredWriter`. """ def get_transforms(self): # Do not add any transforms. They have already been applied # by the reader which originally created the document. return Component.get_transforms(self) _reader_aliases = {} def get_reader_class(reader_name): """Return the Reader class from the `reader_name` module.""" reader_name = reader_name.lower() if reader_name in _reader_aliases: reader_name = _reader_aliases[reader_name] try: module = __import__(reader_name, globals(), locals(), level=1) except ImportError: module = __import__(reader_name, globals(), locals(), level=0) return module.Reader """ Example generation for the scikit learn Generate the rst files for the examples by iterating over the python example files. Files that generate images should start with 'plot' """ from __future__ import division, print_function from time import time import ast import os import re import shutil import traceback import glob import sys import gzip import posixpath import subprocess from textwrap import dedent # Try Python 2 first, otherwise load from Python 3 try: from StringIO import StringIO import cPickle as pickle import urllib2 as urllib from urllib2 import HTTPError, URLError except ImportError: from io import StringIO import pickle import urllib.request import urllib.error import urllib.parse from urllib.error import HTTPError, URLError try: # Python 2 built-in execfile except NameError: def execfile(filename, global_vars=None, local_vars=None): with open(filename) as f: code = compile(f.read(), filename, 'exec') exec(code, global_vars, local_vars) try: basestring except NameError: basestring = str import token import tokenize import numpy as np try: # make sure that the Agg backend is set before importing any # matplotlib import matplotlib matplotlib.use('Agg') except ImportError: # this script can be imported by nosetest to find tests to run: we should not # impose the matplotlib requirement in that case. pass from sklearn.externals import joblib ############################################################################### # A tee object to redict streams to multiple outputs class Tee(object): def __init__(self, file1, file2): self.file1 = file1 self.file2 = file2 def write(self, data): self.file1.write(data) self.file2.write(data) def flush(self): self.file1.flush() self.file2.flush() ############################################################################### # Documentation link resolver objects def _get_data(url): """Helper function to get data over http or from a local file""" if url.startswith('http://'): # Try Python 2, use Python 3 on exception try: resp = urllib.urlopen(url) encoding = resp.headers.dict.get('content-encoding', 'plain') except AttributeError: resp = urllib.request.urlopen(url) encoding = resp.headers.get('content-encoding', 'plain') data = resp.read() if encoding == 'plain': pass elif encoding == 'gzip': data = StringIO(data) data = gzip.GzipFile(fileobj=data).read() else: raise RuntimeError('unknown encoding') else: with open(url, 'r') as fid: data = fid.read() fid.close() return data mem = joblib.Memory(cachedir='_build') get_data = mem.cache(_get_data) def parse_sphinx_searchindex(searchindex): """Parse a Sphinx search index Parameters ---------- searchindex : str The Sphinx search index (contents of searchindex.js) Returns ------- filenames : list of str The file names parsed from the search index. objects : dict The objects parsed from the search index. """ def _select_block(str_in, start_tag, end_tag): """Select first block delimited by start_tag and end_tag""" start_pos = str_in.find(start_tag) if start_pos < 0: raise ValueError('start_tag not found') depth = 0 for pos in range(start_pos, len(str_in)): if str_in[pos] == start_tag: depth += 1 elif str_in[pos] == end_tag: depth -= 1 if depth == 0: break sel = str_in[start_pos + 1:pos] return sel def _parse_dict_recursive(dict_str): """Parse a dictionary from the search index""" dict_out = dict() pos_last = 0 pos = dict_str.find(':') while pos >= 0: key = dict_str[pos_last:pos] if dict_str[pos + 1] == '[': # value is a list pos_tmp = dict_str.find(']', pos + 1) if pos_tmp < 0: raise RuntimeError('error when parsing dict') value = dict_str[pos + 2: pos_tmp].split(',') # try to convert elements to int for i in range(len(value)): try: value[i] = int(value[i]) except ValueError: pass elif dict_str[pos + 1] == '{': # value is another dictionary subdict_str = _select_block(dict_str[pos:], '{', '}') value = _parse_dict_recursive(subdict_str) pos_tmp = pos + len(subdict_str) else: raise ValueError('error when parsing dict: unknown elem') key = key.strip('"') if len(key) > 0: dict_out[key] = value pos_last = dict_str.find(',', pos_tmp) if pos_last < 0: break pos_last += 1 pos = dict_str.find(':', pos_last) return dict_out # Make sure searchindex uses UTF-8 encoding if hasattr(searchindex, 'decode'): searchindex = searchindex.decode('UTF-8') # parse objects query = 'objects:' pos = searchindex.find(query) if pos < 0: raise ValueError('"objects:" not found in search index') sel = _select_block(searchindex[pos:], '{', '}') objects = _parse_dict_recursive(sel) # parse filenames query = 'filenames:' pos = searchindex.find(query) if pos < 0: raise ValueError('"filenames:" not found in search index') filenames = searchindex[pos + len(query) + 1:] filenames = filenames[:filenames.find(']')] filenames = [f.strip('"') for f in filenames.split(',')] return filenames, objects class SphinxDocLinkResolver(object): """ Resolve documentation links using searchindex.js generated by Sphinx Parameters ---------- doc_url : str The base URL of the project website. searchindex : str Filename of searchindex, relative to doc_url. extra_modules_test : list of str List of extra module names to test. relative : bool Return relative links (only useful for links to documentation of this package). """ def __init__(self, doc_url, searchindex='searchindex.js', extra_modules_test=None, relative=False): self.doc_url = doc_url self.relative = relative self._link_cache = {} self.extra_modules_test = extra_modules_test self._page_cache = {} if doc_url.startswith('http://'): if relative: raise ValueError('Relative links are only supported for local ' 'URLs (doc_url cannot start with "http://)"') searchindex_url = doc_url + '/' + searchindex else: searchindex_url = os.path.join(doc_url, searchindex) # detect if we are using relative links on a Windows system if os.name.lower() == 'nt' and not doc_url.startswith('http://'): if not relative: raise ValueError('You have to use relative=True for the local' ' package on a Windows system.') self._is_windows = True else: self._is_windows = False # download and initialize the search index sindex = get_data(searchindex_url) filenames, objects = parse_sphinx_searchindex(sindex) self._searchindex = dict(filenames=filenames, objects=objects) def _get_link(self, cobj): """Get a valid link, False if not found""" fname_idx = None full_name = cobj['module_short'] + '.' + cobj['name'] if full_name in self._searchindex['objects']: value = self._searchindex['objects'][full_name] if isinstance(value, dict): value = value[next(iter(value.keys()))] fname_idx = value[0] elif cobj['module_short'] in self._searchindex['objects']: value = self._searchindex['objects'][cobj['module_short']] if cobj['name'] in value.keys(): fname_idx = value[cobj['name']][0] if fname_idx is not None: fname = self._searchindex['filenames'][fname_idx] + '.html' if self._is_windows: fname = fname.replace('/', '\\') link = os.path.join(self.doc_url, fname) else: link = posixpath.join(self.doc_url, fname) if hasattr(link, 'decode'): link = link.decode('utf-8', 'replace') if link in self._page_cache: html = self._page_cache[link] else: html = get_data(link) self._page_cache[link] = html # test if cobj appears in page comb_names = [cobj['module_short'] + '.' + cobj['name']] if self.extra_modules_test is not None: for mod in self.extra_modules_test: comb_names.append(mod + '.' + cobj['name']) url = False if hasattr(html, 'decode'): # Decode bytes under Python 3 html = html.decode('utf-8', 'replace') for comb_name in comb_names: if hasattr(comb_name, 'decode'): # Decode bytes under Python 3 comb_name = comb_name.decode('utf-8', 'replace') if comb_name in html: url = link + u'#' + comb_name link = url else: link = False return link def resolve(self, cobj, this_url): """Resolve the link to the documentation, returns None if not found Parameters ---------- cobj : dict Dict with information about the "code object" for which we are resolving a link. cobi['name'] : function or class name (str) cobj['module_short'] : shortened module name (str) cobj['module'] : module name (str) this_url: str URL of the current page. Needed to construct relative URLs (only used if relative=True in constructor). Returns ------- link : str | None The link (URL) to the documentation. """ full_name = cobj['module_short'] + '.' + cobj['name'] link = self._link_cache.get(full_name, None) if link is None: # we don't have it cached link = self._get_link(cobj) # cache it for the future self._link_cache[full_name] = link if link is False or link is None: # failed to resolve return None if self.relative: link = os.path.relpath(link, start=this_url) if self._is_windows: # replace '\' with '/' so it on the web link = link.replace('\\', '/') # for some reason, the relative link goes one directory too high up link = link[3:] return link ############################################################################### rst_template = """ .. _example_%(short_fname)s: %(docstring)s **Python source code:** :download:`%(fname)s <%(fname)s>` .. literalinclude:: %(fname)s :lines: %(end_row)s- """ plot_rst_template = """ .. _example_%(short_fname)s: %(docstring)s %(image_list)s %(stdout)s **Python source code:** :download:`%(fname)s <%(fname)s>` .. literalinclude:: %(fname)s :lines: %(end_row)s- **Total running time of the example:** %(time_elapsed) .2f seconds (%(time_m) .0f minutes %(time_s) .2f seconds) """ # The following strings are used when we have several pictures: we use # an html div tag that our CSS uses to turn the lists into horizontal # lists. HLIST_HEADER = """ .. rst-class:: horizontal """ HLIST_IMAGE_TEMPLATE = """ * .. image:: images/%s :scale: 47 """ SINGLE_IMAGE = """ .. image:: images/%s :align: center """ # The following dictionary contains the information used to create the # thumbnails for the front page of the scikit-learn home page. # key: first image in set # values: (number of plot in set, height of thumbnail) carousel_thumbs = {'plot_single_localization_001.png': (1, 250), 'plot_multiple_localization_001.png': (1, 250), 'plot_overfeat_layer1_filters_001.png': (1, 250), 'plot_mnist_generator_001.png': (1, 250), 'plot_asirra_dataset_001.png': (1, 250), } def extract_docstring(filename, ignore_heading=False): """ Extract a module-level docstring, if any """ lines = open(filename).readlines() start_row = 0 if lines[0].startswith('#!'): lines.pop(0) start_row = 1 docstring = '' first_par = '' line_iterator = iter(lines) tokens = tokenize.generate_tokens(lambda: next(line_iterator)) for tok_type, tok_content, _, (erow, _), _ in tokens: tok_type = token.tok_name[tok_type] if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'): continue elif tok_type == 'STRING': docstring = eval(tok_content) # If the docstring is formatted with several paragraphs, extract # the first one: paragraphs = '\n'.join( line.rstrip() for line in docstring.split('\n')).split('\n\n') if paragraphs: if ignore_heading: if len(paragraphs) > 1: first_par = re.sub('\n', ' ', paragraphs[1]) first_par = ((first_par[:95] + '...') if len(first_par) > 95 else first_par) else: raise ValueError("Docstring not found by gallery", "Please check your example's layout", " and make sure it's correct") else: first_par = paragraphs[0] break return docstring, first_par, erow + 1 + start_row def generate_example_rst(app): """ Generate the list of examples, as well as the contents of examples. """ root_dir = os.path.join(app.builder.srcdir, 'auto_examples') example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..', 'examples')) generated_dir = os.path.abspath(os.path.join(app.builder.srcdir, 'modules', 'generated')) try: plot_gallery = eval(app.builder.config.plot_gallery) except TypeError: plot_gallery = bool(app.builder.config.plot_gallery) if not os.path.exists(example_dir): os.makedirs(example_dir) if not os.path.exists(root_dir): os.makedirs(root_dir) if not os.path.exists(generated_dir): os.makedirs(generated_dir) # we create an index.rst with all examples fhindex = open(os.path.join(root_dir, 'index.rst'), 'w') # Note: The sidebar button has been removed from the examples page for now # due to how it messes up the layout. Will be fixed at a later point fhindex.write("""\ .. raw:: html Examples ======== .. _examples-index: """) # Here we don't use an os.walk, but we recurse only twice: flat is # better than nested. seen_backrefs = set() generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs) for dir in sorted(os.listdir(example_dir)): if os.path.isdir(os.path.join(example_dir, dir)): generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs) fhindex.flush() def extract_line_count(filename, target_dir): # Extract the line count of a file example_file = os.path.join(target_dir, filename) lines = open(example_file).readlines() start_row = 0 if lines and lines[0].startswith('#!'): lines.pop(0) start_row = 1 line_iterator = iter(lines) tokens = tokenize.generate_tokens(lambda: next(line_iterator)) check_docstring = True erow_docstring = 0 for tok_type, _, _, (erow, _), _ in tokens: tok_type = token.tok_name[tok_type] if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'): continue elif ((tok_type == 'STRING') and check_docstring): erow_docstring = erow check_docstring = False return erow_docstring+1+start_row, erow+1+start_row def line_count_sort(file_list, target_dir): # Sort the list of examples by line-count new_list = [x for x in file_list if x.endswith('.py')] unsorted = np.zeros(shape=(len(new_list), 2)) unsorted = unsorted.astype(np.object) for count, exmpl in enumerate(new_list): docstr_lines, total_lines = extract_line_count(exmpl, target_dir) unsorted[count][1] = total_lines - docstr_lines unsorted[count][0] = exmpl index = np.lexsort((unsorted[:, 0].astype(np.str), unsorted[:, 1].astype(np.float))) if not len(unsorted): return [] return np.array(unsorted[index][:, 0]).tolist() def _thumbnail_div(subdir, full_dir, fname, snippet): """Generates RST to place a thumbnail in a gallery""" thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png') link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_') ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_') if ref_name.startswith('._'): ref_name = ref_name[2:] out = [] out.append(""" .. raw:: html
""") out.append('.. figure:: %s\n' % thumb) if link_name.startswith('._'): link_name = link_name[2:] if full_dir != '.': out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3])) else: out.append(' :target: ./%s.html\n\n' % link_name[:-3]) out.append(""" :ref:`example_%s` .. raw:: html

%s

""" % (ref_name, snippet)) return ''.join(out) def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs): """ Generate the rst file for an example directory. """ if not dir == '.': target_dir = os.path.join(root_dir, dir) src_dir = os.path.join(example_dir, dir) else: target_dir = root_dir src_dir = example_dir if not os.path.exists(os.path.join(src_dir, 'README.txt')): raise ValueError('Example directory %s does not have a README.txt' % src_dir) fhindex.write(""" %s """ % open(os.path.join(src_dir, 'README.txt')).read()) if not os.path.exists(target_dir): os.makedirs(target_dir) sorted_listdir = line_count_sort(os.listdir(src_dir), src_dir) if not os.path.exists(os.path.join(dir, 'images', 'thumb')): os.makedirs(os.path.join(dir, 'images', 'thumb')) for fname in sorted_listdir: if fname.endswith('py'): backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery) new_fname = os.path.join(src_dir, fname) _, snippet, _ = extract_docstring(new_fname, True) fhindex.write(_thumbnail_div(dir, dir, fname, snippet)) fhindex.write(""" .. toctree:: :hidden: %s/%s """ % (dir, fname[:-3])) for backref in backrefs: include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref) seen = backref in seen_backrefs with open(include_path, 'a' if seen else 'w') as ex_file: if not seen: # heading print(file=ex_file) print('Examples using ``%s``' % backref, file=ex_file) print('-----------------%s--' % ('-' * len(backref)), file=ex_file) print(file=ex_file) rel_dir = os.path.join('../../auto_examples', dir) ex_file.write(_thumbnail_div(dir, rel_dir, fname, snippet)) seen_backrefs.add(backref) fhindex.write(""" .. raw:: html
""") # clear at the end of the section # modules for which we embed links into example code DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy'] def make_thumbnail(in_fname, out_fname, width, height): """Make a thumbnail with the same aspect ratio centered in an image with a given width and height """ # local import to avoid testing dependency on PIL: try: from PIL import Image except ImportError: import Image img = Image.open(in_fname) width_in, height_in = img.size scale_w = width / float(width_in) scale_h = height / float(height_in) if height_in * scale_w <= height: scale = scale_w else: scale = scale_h width_sc = int(round(scale * width_in)) height_sc = int(round(scale * height_in)) # resize the image img.thumbnail((width_sc, height_sc), Image.ANTIALIAS) # insert centered thumb = Image.new('RGB', (width, height), (255, 255, 255)) pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2) thumb.paste(img, pos_insert) thumb.save(out_fname) # Use optipng to perform lossless compression on the resized image if # software is installed if os.environ.get('SKLEARN_DOC_OPTIPNG', False): try: subprocess.call(["optipng", "-quiet", "-o", "9", out_fname]) except Exception: warnings.warn('Install optipng to reduce the size of the generated images') def get_short_module_name(module_name, obj_name): """ Get the shortest possible module name """ parts = module_name.split('.') short_name = module_name for i in range(len(parts) - 1, 0, -1): short_name = '.'.join(parts[:i]) try: exec('from %s import %s' % (short_name, obj_name)) except ImportError: # get the last working module name short_name = '.'.join(parts[:(i + 1)]) break return short_name class NameFinder(ast.NodeVisitor): """Finds the longest form of variable names and their imports in code Only retains names from imported modules. """ def __init__(self): super(NameFinder, self).__init__() self.imported_names = {} self.accessed_names = set() def visit_Import(self, node, prefix=''): for alias in node.names: local_name = alias.asname or alias.name self.imported_names[local_name] = prefix + alias.name def visit_ImportFrom(self, node): self.visit_Import(node, node.module + '.') def visit_Name(self, node): self.accessed_names.add(node.id) def visit_Attribute(self, node): attrs = [] while isinstance(node, ast.Attribute): attrs.append(node.attr) node = node.value if isinstance(node, ast.Name): # This is a.b, not e.g. a().b attrs.append(node.id) self.accessed_names.add('.'.join(reversed(attrs))) else: # need to get a in a().b self.visit(node) def get_mapping(self): for name in self.accessed_names: local_name = name.split('.', 1)[0] remainder = name[len(local_name):] if local_name in self.imported_names: # Join import path to relative path full_name = self.imported_names[local_name] + remainder yield name, full_name def identify_names(code): """Builds a codeobj summary by identifying and resovles used names >>> code = ''' ... from a.b import c ... import d as e ... print(c) ... e.HelloWorld().f.g ... ''' >>> for name, o in sorted(identify_names(code).items()): ... print(name, o['name'], o['module'], o['module_short']) c c a.b a.b e.HelloWorld HelloWorld d d """ finder = NameFinder() finder.visit(ast.parse(code)) example_code_obj = {} for name, full_name in finder.get_mapping(): # name is as written in file (e.g. np.asarray) # full_name includes resolved import path (e.g. numpy.asarray) module, attribute = full_name.rsplit('.', 1) # get shortened module name module_short = get_short_module_name(module, attribute) cobj = {'name': attribute, 'module': module, 'module_short': module_short} example_code_obj[name] = cobj return example_code_obj def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery): """ Generate the rst file for a given example. Returns the set of sklearn functions/classes imported in the example. """ base_image_name = os.path.splitext(fname)[0] image_fname = '%s_%%03d.png' % base_image_name this_template = rst_template last_dir = os.path.split(src_dir)[-1] # to avoid leading . in file names, and wrong names in links if last_dir == '.' or last_dir == 'examples': last_dir = '' else: last_dir += '_' short_fname = last_dir + fname src_file = os.path.join(src_dir, fname) example_file = os.path.join(target_dir, fname) shutil.copyfile(src_file, example_file) # The following is a list containing all the figure names figure_list = [] image_dir = os.path.join(target_dir, 'images') thumb_dir = os.path.join(image_dir, 'thumb') if not os.path.exists(image_dir): os.makedirs(image_dir) if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) image_path = os.path.join(image_dir, image_fname) stdout_path = os.path.join(image_dir, 'stdout_%s.txt' % base_image_name) time_path = os.path.join(image_dir, 'time_%s.txt' % base_image_name) thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png') time_elapsed = 0 time_m = 0 time_s = 0 if plot_gallery and fname.startswith('plot'): # generate the plot as png image if file name # starts with plot and if it is more recent than an # existing image. first_image_file = image_path % 1 if os.path.exists(stdout_path): stdout = open(stdout_path).read() else: stdout = '' if os.path.exists(time_path): time_elapsed = float(open(time_path).read()) if not os.path.exists(first_image_file) or \ os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime: # We need to execute the code print('plotting %s' % fname) t0 = time() import matplotlib.pyplot as plt plt.close('all') cwd = os.getcwd() try: # First CD in the original example dir, so that any file # created by the example get created in this directory orig_stdout = sys.stdout os.chdir(os.path.dirname(src_file)) my_buffer = StringIO() my_stdout = Tee(sys.stdout, my_buffer) sys.stdout = my_stdout my_globals = {'pl': plt} execfile(os.path.basename(src_file), my_globals) time_elapsed = time() - t0 sys.stdout = orig_stdout my_stdout = my_buffer.getvalue() if '__doc__' in my_globals: # The __doc__ is often printed in the example, we # don't with to echo it my_stdout = my_stdout.replace( my_globals['__doc__'], '') my_stdout = my_stdout.strip() if my_stdout: stdout = '**Script output**::\n\n %s\n\n' % ( '\n '.join(my_stdout.split('\n'))) open(stdout_path, 'w').write(stdout) open(time_path, 'w').write('%f' % time_elapsed) os.chdir(cwd) # In order to save every figure we have two solutions : # * iterate from 1 to infinity and call plt.fignum_exists(n) # (this requires the figures to be numbered # incrementally: 1, 2, 3 and not 1, 2, 5) # * iterate over [fig_mngr.num for fig_mngr in # matplotlib._pylab_helpers.Gcf.get_all_fig_managers()] fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers() for fig_mngr in fig_managers: # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. plt.figure(fig_mngr.num) plt.savefig(image_path % fig_mngr.num) figure_list.append(image_fname % fig_mngr.num) except: print(80 * '_') print('%s is not compiling:' % fname) traceback.print_exc() print(80 * '_') finally: os.chdir(cwd) sys.stdout = orig_stdout print(" - time elapsed : %.2g sec" % time_elapsed) else: figure_list = [f[len(image_dir):] for f in glob.glob(image_path.replace("%03d", '[0-9][0-9][0-9]'))] figure_list.sort() # generate thumb file this_template = plot_rst_template car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/dev/_images/') # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file` # which is within `auto_examples/../images/thumbs` depending on the example. # Because the carousel has different dimensions than those of the examples gallery, # I did not simply reuse them all as some contained whitespace due to their default gallery # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't # just be overwritten with the carousel dimensions as it messes up the examples gallery layout). # The special carousel thumbnails are written directly to # _build/html/dev/_images/, # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then # copied to the _images folder during the `Copying Downloadable Files` step like the rest. if not os.path.exists(car_thumb_path): os.makedirs(car_thumb_path) if os.path.exists(first_image_file): # We generate extra special thumbnails for the carousel carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png') first_img = image_fname % 1 if first_img in carousel_thumbs: make_thumbnail((image_path % carousel_thumbs[first_img][0]), carousel_tfile, carousel_thumbs[first_img][1], 190) make_thumbnail(first_image_file, thumb_file, 400, 280) if not os.path.exists(thumb_file): # create something to replace the thumbnail make_thumbnail('images/no_image.png', thumb_file, 200, 140) docstring, short_desc, end_row = extract_docstring(example_file) # Depending on whether we have one or more figures, we're using a # horizontal list or a single rst call to 'image'. if len(figure_list) == 1: figure_name = figure_list[0] image_list = SINGLE_IMAGE % figure_name.lstrip('/') else: image_list = HLIST_HEADER for figure_name in figure_list: image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/') time_m, time_s = divmod(time_elapsed, 60) f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w') f.write(this_template % locals()) f.flush() # save variables so we can later add links to the documentation example_code_obj = identify_names(open(example_file).read()) if example_code_obj: codeobj_fname = example_file[:-3] + '_codeobj.pickle' with open(codeobj_fname, 'wb') as fid: pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL) backrefs = set('{module_short}.{name}'.format(**entry) for entry in example_code_obj.values() if entry['module'].startswith('sklearn')) return backrefs def embed_code_links(app, exception): """Embed hyperlinks to documentation into example code""" try: if exception is not None: return print('Embedding documentation hyperlinks in examples..') # Add resolvers for the packages for which we want to show links doc_resolvers = {} doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir, relative=True) doc_resolvers['matplotlib'] = SphinxDocLinkResolver( 'http://matplotlib.org') doc_resolvers['numpy'] = SphinxDocLinkResolver( 'http://docs.scipy.org/doc/numpy-1.6.0') doc_resolvers['scipy'] = SphinxDocLinkResolver( 'http://docs.scipy.org/doc/scipy-0.11.0/reference') example_dir = os.path.join(app.builder.srcdir, 'auto_examples') html_example_dir = os.path.abspath(os.path.join(app.builder.outdir, 'auto_examples')) # patterns for replacement link_pattern = '%s' orig_pattern = '%s' period = '.' for dirpath, _, filenames in os.walk(html_example_dir): for fname in filenames: print('\tprocessing: %s' % fname) full_fname = os.path.join(html_example_dir, dirpath, fname) subpath = dirpath[len(html_example_dir) + 1:] pickle_fname = os.path.join(example_dir, subpath, fname[:-5] + '_codeobj.pickle') if os.path.exists(pickle_fname): # we have a pickle file with the objects to embed links for with open(pickle_fname, 'rb') as fid: example_code_obj = pickle.load(fid) fid.close() str_repl = {} # generate replacement strings with the links for name, cobj in example_code_obj.items(): this_module = cobj['module'].split('.')[0] if this_module not in doc_resolvers: continue link = doc_resolvers[this_module].resolve(cobj, full_fname) if link is not None: parts = name.split('.') name_html = period.join(orig_pattern % part for part in parts) str_repl[name_html] = link_pattern % (link, name_html) # do the replacement in the html file # ensure greediness names = sorted(str_repl, key=len, reverse=True) expr = re.compile(r'(? 0: with open(full_fname, 'rb') as fid: lines_in = fid.readlines() with open(full_fname, 'wb') as fid: for line in lines_in: line = line.decode('utf-8') line = expr.sub(substitute_link, line) fid.write(line.encode('utf-8')) except HTTPError as e: print("The following HTTP Error has occurred:\n") print(e.code) except URLError as e: print("\n...\n" "Warning: Embedding the documentation hyperlinks requires " "internet access.\nPlease check your network connection.\n" "Unable to continue embedding due to a URL Error: \n") print(e.args) print('[done]') def setup(app): app.connect('builder-inited', generate_example_rst) app.add_config_value('plot_gallery', True, 'html') # embed links after build is finished app.connect('build-finished', embed_code_links) # Sphinx hack: sphinx copies generated images to the build directory # each time the docs are made. If the desired image name already # exists, it appends a digit to prevent overwrites. The problem is, # the directory is never cleared. This means that each time you build # the docs, the number of images in the directory grows. # # This question has been asked on the sphinx development list, but there # was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html # # The following is a hack that prevents this behavior by clearing the # image build directory each time the docs are built. If sphinx # changes their layout between versions, this will not work (though # it should probably not cause a crash). Tested successfully # on Sphinx 1.0.7 build_image_dir = '_build/html/_images' if os.path.exists(build_image_dir): filelist = os.listdir(build_image_dir) for filename in filelist: if filename.endswith('png'): os.remove(os.path.join(build_image_dir, filename)) def setup_module(): # HACK: Stop nosetests running setup() above pass # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('contenttypes', '0001_initial'), ] operations = [ migrations.CreateModel( name='DisplayField', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('path', models.CharField(max_length=2000, blank=True)), ('path_verbose', models.CharField(max_length=2000, blank=True)), ('field', models.CharField(max_length=2000)), ('field_verbose', models.CharField(max_length=2000)), ('name', models.CharField(max_length=2000)), ('sort', models.IntegerField(null=True, blank=True)), ('sort_reverse', models.BooleanField(default=False, verbose_name=b'Reverse')), ('width', models.IntegerField(default=15)), ('aggregate', models.CharField(blank=True, max_length=5, choices=[(b'Sum', b'Sum'), (b'Count', b'Count'), (b'Avg', b'Avg'), (b'Max', b'Max'), (b'Min', b'Min')])), ('position', models.PositiveSmallIntegerField(null=True, blank=True)), ('total', models.BooleanField(default=False)), ('group', models.BooleanField(default=False)), ], options={ 'ordering': ['position'], }, bases=(models.Model,), ), migrations.CreateModel( name='FilterField', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('path', models.CharField(max_length=2000, blank=True)), ('path_verbose', models.CharField(max_length=2000, blank=True)), ('field', models.CharField(max_length=2000)), ('field_verbose', models.CharField(max_length=2000)), ('filter_type', models.CharField(default=b'icontains', max_length=20, blank=True, choices=[(b'exact', b'Equals'), (b'iexact', b'Equals (case-insensitive)'), (b'contains', b'Contains'), (b'icontains', b'Contains (case-insensitive)'), (b'in', b'in (comma seperated 1,2,3)'), (b'gt', b'Greater than'), (b'gte', b'Greater than equals'), (b'lt', b'Less than'), (b'lte', b'Less than equals'), (b'startswith', b'Starts with'), (b'istartswith', b'Starts with (case-insensitive)'), (b'endswith', b'Ends with'), (b'iendswith', b'Ends with (case-insensitive)'), (b'range', b'range'), (b'week_day', b'Week day'), (b'isnull', b'Is null'), (b'regex', b'Regular Expression'), (b'iregex', b'Reg. Exp. (case-insensitive)')])), ('filter_value', models.CharField(max_length=2000)), ('filter_value2', models.CharField(max_length=2000, blank=True)), ('exclude', models.BooleanField(default=False)), ('position', models.PositiveSmallIntegerField(null=True, blank=True)), ], options={ 'ordering': ['position'], }, bases=(models.Model,), ), migrations.CreateModel( name='Format', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(default=b'', max_length=50, blank=True)), ('string', models.CharField(default=b'', help_text=b'Python string format. Ex ${} would place a $ in front of the result.', max_length=300, blank=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Report', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=255)), ('slug', models.SlugField(verbose_name=b'Short Name')), ('description', models.TextField(blank=True)), ('created', models.DateField(auto_now_add=True)), ('modified', models.DateField(auto_now=True)), ('distinct', models.BooleanField(default=False)), ('report_file', models.FileField(upload_to=b'report_files', blank=True)), ('report_file_creation', models.DateTimeField(null=True, blank=True)), ('root_model', models.ForeignKey(to='contenttypes.ContentType')), ('starred', models.ManyToManyField(help_text=b'These users have starred this report for easy reference.', related_name='report_starred_set', to=settings.AUTH_USER_MODEL, blank=True)), ('user_created', models.ForeignKey(blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)), ('user_modified', models.ForeignKey(related_name='report_modified_set', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)), ], options={ }, bases=(models.Model,), ), migrations.AddField( model_name='filterfield', name='report', field=models.ForeignKey(to='report_builder.Report'), preserve_default=True, ), migrations.AddField( model_name='displayfield', name='display_format', field=models.ForeignKey(blank=True, to='report_builder.Format', null=True), preserve_default=True, ), migrations.AddField( model_name='displayfield', name='report', field=models.ForeignKey(to='report_builder.Report'), preserve_default=True, ), ] """OpenGL-wide constant types (not OpenGL.GL-specific) These are basically the fundamental data-types that OpenGL uses (note, doesn't include the OpenGL-ES types!) """ import ctypes from OpenGL.constant import Constant GL_FALSE = Constant( 'GL_FALSE', 0x0 ) GL_TRUE = Constant( 'GL_TRUE', 0x1 ) GL_BYTE = Constant( 'GL_BYTE', 0x1400 ) GL_UNSIGNED_BYTE = Constant( 'GL_UNSIGNED_BYTE', 0x1401 ) GL_SHORT = Constant( 'GL_SHORT', 0x1402 ) GL_UNSIGNED_SHORT = Constant( 'GL_UNSIGNED_SHORT', 0x1403 ) GL_INT = Constant( 'GL_INT', 0x1404 ) GL_UNSIGNED_INT = Constant( 'GL_UNSIGNED_INT', 0x1405 ) GL_UNSIGNED_INT64 = Constant( 'GL_UNSIGNED_INT64_AMD', 0x8BC2 ) GL_FLOAT = Constant( 'GL_FLOAT', 0x1406 ) GL_DOUBLE = Constant( 'GL_DOUBLE', 0x140a ) GL_CHAR = str GL_HALF_NV = Constant( 'GL_HALF_NV', 0x1401 ) ctypes_version = [int(i) for i in ctypes.__version__.split('.')[:3]] # Basic OpenGL data-types as ctypes declarations... def _defineType( name, baseType, convertFunc = long ): import OpenGL do_wrapping = ( OpenGL.ALLOW_NUMPY_SCALARS or # explicitly require (( # or we are using Python 2.5.x ctypes which doesn't support uint type numpy scalars ctypes_version < [1,1,0] and baseType in (ctypes.c_uint,ctypes.c_uint64,ctypes.c_ulong,ctypes.c_ushort) ) or ( # or we are using Python 2.5.x (x < 2) ctypes which doesn't support any numpy int scalars ctypes_version < [1,0,2] and baseType in (ctypes.c_int,ctypes.c_int64,ctypes.c_long,ctypes.c_short) )) ) if do_wrapping: original = baseType.from_param if not getattr( original, 'from_param_numpy_scalar', False ): def from_param( x, typeCode=None ): try: return original( x ) except TypeError, err: try: return original( convertFunc(x) ) except TypeError, err2: raise err from_param = staticmethod( from_param ) setattr( baseType, 'from_param', from_param ) baseType.from_param_numpy_scalar = True return baseType else: return baseType GLvoid = None GLboolean = _defineType( 'GLboolean', ctypes.c_ubyte, bool ) GLenum = _defineType( 'GLenum', ctypes.c_uint ) GLfloat = _defineType( 'GLfloat', ctypes.c_float, float ) GLfloat_2 = GLfloat * 2 GLfloat_3 = GLfloat * 3 GLfloat_4 = GLfloat * 4 GLdouble = _defineType( 'GLdouble', ctypes.c_double, float ) GLdouble_2 = GLdouble * 2 GLdouble_3 = GLdouble * 3 GLdouble_4 = GLdouble * 4 GLbyte = ctypes.c_byte GLshort = _defineType( 'GLshort', ctypes.c_short, int ) GLint = _defineType( 'GLint', ctypes.c_int, int ) GLuint = _defineType( 'GLuint', ctypes.c_uint, long ) GLsizei = _defineType( 'GLsizei', ctypes.c_int, int ) GLubyte = ctypes.c_ubyte GLubyte_3 = GLubyte * 3 GLushort = _defineType( 'GLushort', ctypes.c_ushort, int ) GLhandleARB = _defineType( 'GLhandleARB', ctypes.c_uint, long ) GLhandle = _defineType( 'GLhandle', ctypes.c_uint, long ) GLchar = GLcharARB = ctypes.c_char GLbitfield = _defineType( 'GLbitfield', ctypes.c_uint, long ) GLclampd = _defineType( 'GLclampd', ctypes.c_double, float ) GLclampf = _defineType( 'GLclampf', ctypes.c_float, float ) GLuint64 = GLuint64EXT = _defineType('GLuint64', ctypes.c_uint64, long ) GLint64 = GLint64EXT = _defineType('GLint64', ctypes.c_int64, long ) # ptrdiff_t, actually... GLsizeiptrARB = GLsizeiptr = GLsizei GLintptrARB = GLintptr = GLint size_t = ctypes.c_ulong void = None GLhalfNV = GLhalfARB = ctypes.c_ushort # GL.ARB.sync extension, GLsync is an opaque pointer to a struct # in the extensions header, basically just a "token" that can be # passed to the various operations... GLsync = ctypes.c_void_p ARRAY_TYPE_TO_CONSTANT = [ ('GLclampd', GL_DOUBLE), ('GLclampf', GL_FLOAT), ('GLfloat', GL_FLOAT), ('GLdouble', GL_DOUBLE), ('GLbyte', GL_BYTE), ('GLshort', GL_SHORT), ('GLint', GL_INT), ('GLubyte', GL_UNSIGNED_BYTE), ('GLushort', GL_UNSIGNED_SHORT), ('GLuint', GL_UNSIGNED_INT), ('GLenum', GL_UNSIGNED_INT), ] # $Id$ # # Copyright (C) 2003-2006 Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # """unit testing code for graph-theoretical descriptors """ from __future__ import print_function from rdkit import RDConfig import unittest,os.path from rdkit import Chem from rdkit.Chem import GraphDescriptors,MolSurf,Lipinski,Crippen def feq(n1,n2,tol=1e-4): return abs(n1-n2)<=tol class TestCase(unittest.TestCase): def setUp(self): if doLong: print('\n%s: '%self.shortDescription(),end='') def testBertzCTShort(self): """ test calculation of Bertz 'C(T)' index """ data = [('C=CC=C',21.01955), ('O=CC=O',25.01955), ('FCC(=O)CF',46.7548875), ('O=C1C=CC(=O)C=C1',148.705216), ('C12C(F)=C(O)C(F)C1C(F)=C(O)C(F)2',315.250442), ('C12CC=CCC1C(=O)C3CC=CCC3C(=O)2',321.539522)] for smi,CT in data: m = Chem.MolFromSmiles(smi) newCT = GraphDescriptors.BertzCT(m, forceDMat = 1) assert feq(newCT,CT,1e-3),'mol %s (CT calc = %f) should have CT = %f'%(smi,newCT,CT) def _testBertzCTLong(self): """ test calculation of Bertz 'C(T)' index NOTE: this is a backwards compatibility test, because of the changes w.r.t. the treatment of aromatic atoms in the new version, we need to ignore molecules with aromatic rings... """ col = 1 with open(os.path.join(RDConfig.RDCodeDir,'Chem','test_data','PP_descrs_regress.2.csv'),'r') as inF: lineNum=0 for line in inF: lineNum+=1 if line[0] != '#': splitL = line.split(',') smi = splitL[0] m = Chem.MolFromSmiles(smi) assert m,'line %d, smiles: %s'%(lineNum,smi) useIt=1 for atom in m.GetAtoms(): if atom.GetIsAromatic(): useIt=0 break if useIt: tgtVal = float(splitL[col]) try: val = GraphDescriptors.BertzCT(m) except Exception: val = 666 assert feq(val,tgtVal,1e-4),'line %d, mol %s (CT calc = %f) should have CT = %f'%(lineNum,smi,val,tgtVal) def __testDesc(self,fileN,col,func): with open(os.path.join(RDConfig.RDCodeDir,'Chem','test_data',fileN),'r') as inF: lineNum=0 for line in inF: lineNum+=1 if line[0] != '#': splitL = line.split(',') smi = splitL[0] m = Chem.MolFromSmiles(smi) assert m,'line %d, smiles: %s'%(lineNum,smi) useIt=1 if useIt: tgtVal = float(splitL[col]) if not feq(tgtVal,666.0): try: val = func(m) except Exception: val = 666 assert feq(val,tgtVal,1e-4),'line %d, mol %s (calc = %f) should have val = %f'%(lineNum,smi,val,tgtVal) def testChi0Long(self): """ test calculation of Chi0 """ col = 2 self.__testDesc('PP_descrs_regress.csv',col,GraphDescriptors.Chi0) def _testChi0Long2(self): """ test calculation of Chi0 """ col = 2 self.__testDesc('PP_descrs_regress.2.csv',col,GraphDescriptors.Chi0) def testHallKierAlphaLong(self): """ test calculation of the Hall-Kier Alpha value """ col = 3 self.__testDesc('PP_descrs_regress.csv',col,GraphDescriptors.HallKierAlpha) def _testHallKierAlphaLong2(self): """ test calculation of the Hall-Kier Alpha value """ col = 3 self.__testDesc('PP_descrs_regress.2.csv',col,GraphDescriptors.HallKierAlpha) def testIpc(self): """ test calculation of Ipc. """ data = [('CCCCC',1.40564,11.24511),('CCC(C)C',1.37878, 9.65148),('CC(C)(C)C',0.72193,3.60964),('CN(CC)CCC',1.67982,31.91664),('C1CCCCC1',1.71997,34.39946),('CC1CCCCC1',1.68562,47.19725),('Cc1ccccc1',1.68562,47.19725),('CC(C)=C(C)C',1.36096,13.60964),('C#N',1.00000,2.00000),('OC#N',0.91830,2.75489)] for smi,res1,res2 in data: m = Chem.MolFromSmiles(smi) Ipc = GraphDescriptors.Ipc(m, forceDMat=1) Ipc_avg = GraphDescriptors.Ipc(m,avg = 1, forceDMat=1) assert feq(Ipc_avg,res1,1e-3),'mol %s (Ipc_avg=%f) should have Ipc_avg=%f'%(smi,Ipc_avg,res1) assert feq(Ipc,res2,1e-3),'mol %s (Ipc=%f) should have Ipc=%f'%(smi,Ipc,res2) Ipc = GraphDescriptors.Ipc(m) Ipc_avg = GraphDescriptors.Ipc(m,avg = 1) assert feq(Ipc_avg,res1,1e-3),'2nd pass: mol %s (Ipc_avg=%f) should have Ipc_avg=%f'%(smi,Ipc_avg,res1) assert feq(Ipc,res2,1e-3),'2nd pass: mol %s (Ipc=%f) should have Ipc=%f'%(smi,Ipc,res2) def _testIpcLong(self): """ test calculation of Ipc """ col = 4 self.__testDesc('PP_descrs_regress.csv',col,GraphDescriptors.Ipc) def _testIpcLong2(self): """ test calculation of Ipc """ col = 4 self.__testDesc('PP_descrs_regress.2.csv',col,GraphDescriptors.Ipc) def testKappa1(self): """ test calculation of the Hall-Kier kappa1 value corrected data from Tables 3 and 6 of Rev. Comp. Chem. vol 2, 367-422, (1991) """ data = [('C12CC2C3CC13',2.344), ('C1CCC12CC2',3.061), ('C1CCCCC1',4.167), ('CCCCCC',6.000), ('CCC(C)C1CCC(C)CC1',9.091), ('CC(C)CC1CCC(C)CC1',9.091), ('CC(C)C1CCC(C)CCC1',9.091) ] for smi,res in data: m = Chem.MolFromSmiles(smi) kappa = GraphDescriptors.Kappa1(m) assert feq(kappa,res,1e-3),'mol %s (kappa1=%f) should have kappa1=%f'%(smi,kappa,res) def testKappa2(self): """ test calculation of the Hall-Kier kappa2 value corrected data from Tables 5 and 6 of Rev. Comp. Chem. vol 2, 367-422, (1991) """ data = [ ('[C+2](C)(C)(C)(C)(C)C',0.667), ('[C+](C)(C)(C)(C)(CC)',1.240), ('C(C)(C)(C)(CCC)',2.3444), ('CC(C)CCCC',4.167), ('CCCCCCC',6.000), ('CCCCCC',5.000), ('CCCCCCC',6.000), ('C1CCCC1',1.440), ('C1CCCC1C',1.633), ('C1CCCCC1',2.222), ('C1CCCCCC1',3.061), ('CCCCC',4.00), ('CC=CCCC',4.740), ('C1=CN=CN1',0.884), ('c1ccccc1',1.606), ('c1cnccc1',1.552), ('n1ccncc1',1.500), ('CCCCF',3.930), ('CCCCCl',4.290), ('CCCCBr',4.480), ('CCC(C)C1CCC(C)CC1',4.133), ('CC(C)CC1CCC(C)CC1',4.133), ('CC(C)C1CCC(C)CCC1',4.133) ] for smi,res in data: #print smi m = Chem.MolFromSmiles(smi) kappa = GraphDescriptors.Kappa2(m) assert feq(kappa,res,1e-3),'mol %s (kappa2=%f) should have kappa2=%f'%(smi,kappa,res) def testKappa3(self): """ test calculation of the Hall-Kier kappa3 value corrected data from Tables 3 and 6 of Rev. Comp. Chem. vol 2, 367-422, (1991) """ data = [ ('C[C+](C)(C)(C)C(C)(C)C',2.000), ('CCC(C)C(C)(C)(CC)',2.380), ('CCC(C)CC(C)CC',4.500), ('CC(C)CCC(C)CC',5.878), ('CC(C)CCCC(C)C',8.000), ('CCC(C)C1CCC(C)CC1',2.500), ('CC(C)CC1CCC(C)CC1',3.265), ('CC(C)C1CCC(C)CCC1',2.844) ] for smi,res in data: m = Chem.MolFromSmiles(smi) kappa = GraphDescriptors.Kappa3(m) assert feq(kappa,res,1e-3),'mol %s (kappa3=%f) should have kappa3=%f'%(smi,kappa,res) def testKappa3Long(self): """ test calculation of kappa3 """ col = 5 self.__testDesc('PP_descrs_regress.csv',col,GraphDescriptors.Kappa3) def _testKappa3Long2(self): """ test calculation of kappa3 """ col = 5 self.__testDesc('PP_descrs_regress.2.csv',col,GraphDescriptors.Kappa3) def _testLabuteASALong(self): """ test calculation of Labute's ASA value """ col = 6 self.__testDesc('PP_descrs_regress.csv',col,lambda x:MolSurf.LabuteASA(x,includeHs=1)) def _testLabuteASALong2(self): """ test calculation of Labute's ASA value """ col = 6 self.__testDesc('PP_descrs_regress.2.csv',col,lambda x:MolSurf.LabuteASA(x,includeHs=1)) def _testTPSAShortNCI(self): " Short TPSA test " inName = RDConfig.RDDataDir+'/NCI/first_200.tpsa.csv' with open(inName,'r') as inF: lines = inF.readlines() for line in lines: if line[0] != '#': line.strip() smi,ans = line.split(',') ans = float(ans) mol = Chem.MolFromSmiles(smi) calc = MolSurf.TPSA(mol) assert feq(calc,ans),'bad TPSA for SMILES %s (%.2f != %.2f)'%(smi,calc,ans) def _testTPSALongNCI(self): " Long TPSA test " fileN = 'tpsa_regr.csv' with open(os.path.join(RDConfig.RDCodeDir,'Chem','test_data',fileN),'r') as inF: lines = inF.readlines() lineNo = 0 for line in lines: lineNo+=1 if line[0] != '#': line.strip() smi,ans = line.split(',') ans = float(ans) mol = Chem.MolFromSmiles(smi) assert mol,"line %d, failed for smiles: %s"%(lineNo,smi) calc = MolSurf.TPSA(mol) assert feq(calc,ans),'line %d: bad TPSA for SMILES %s (%.2f != %.2f)'%(lineNo,smi,calc,ans) def testTPSALong(self): """ test calculation of TPSA """ col = 28 self.__testDesc('PP_descrs_regress.csv',col,MolSurf.TPSA) def _testTPSALong2(self): """ test calculation of TPSA """ col = 28 self.__testDesc('PP_descrs_regress.2.csv',col,MolSurf.TPSA) def _testLipinskiLong(self): """ test calculation of Lipinski params """ fName = 'PP_descrs_regress.csv' # we can't do H Acceptors for these pyridine-containing molecules # because the values will be wrong for EVERY one. #col = 29 #self.__testDesc(fName,col,Lipinski.NumHAcceptors) col = 30 self.__testDesc(fName,col,Lipinski.NumHDonors) col = 31 self.__testDesc(fName,col,Lipinski.NumHeteroatoms) col = 32 self.__testDesc(fName,col,Lipinski.NumRotatableBonds) def _testHAcceptorsLong(self): """ test calculation of Lipinski params """ fName = 'Block_regress.Lip.csv' col = 1 self.__testDesc(fName,col,Lipinski.NumHAcceptors) def _testHDonorsLong(self): """ test calculation of Lipinski params """ fName = 'Block_regress.Lip.csv' col = 2 self.__testDesc(fName,col,Lipinski.NumHDonors) def _testHeterosLong(self): """ test calculation of Lipinski params """ fName = 'Block_regress.Lip.csv' col = 3 self.__testDesc(fName,col,Lipinski.NumHeteroatoms) def _testRotBondsLong(self): """ test calculation of Lipinski params """ fName = 'Block_regress.Lip.csv' col = 4 self.__testDesc(fName,col,Lipinski.NumRotatableBonds) def _testLogPLong(self): """ test calculation of Lipinski params """ fName = 'PP_descrs_regress.csv' col = 33 self.__testDesc(fName,col,lambda x:Crippen.MolLogP(x,includeHs=1)) def _testLogPLong2(self): """ test calculation of Lipinski params """ fName = 'PP_descrs_regress.2.csv' col = 33 self.__testDesc(fName,col,lambda x:Crippen.MolLogP(x,includeHs=1)) def _testMOELong(self): """ test calculation of MOE-type descriptors """ fName = 'PP_descrs_regress.VSA.csv' col = 1 self.__testDesc(fName,col,MolSurf.SMR_VSA1) col = 2 self.__testDesc(fName,col,MolSurf.SMR_VSA10) col = 3 self.__testDesc(fName,col,MolSurf.SMR_VSA2) col = 4 self.__testDesc(fName,col,MolSurf.SMR_VSA3) col = 5 self.__testDesc(fName,col,MolSurf.SMR_VSA4) col = 6 self.__testDesc(fName,col,MolSurf.SMR_VSA5) col = 7 self.__testDesc(fName,col,MolSurf.SMR_VSA6) col = 8 self.__testDesc(fName,col,MolSurf.SMR_VSA7) col = 9 self.__testDesc(fName,col,MolSurf.SMR_VSA8) col = 10 self.__testDesc(fName,col,MolSurf.SMR_VSA9) col = 11 self.__testDesc(fName,col,MolSurf.SlogP_VSA1) col = 12 self.__testDesc(fName,col,MolSurf.SlogP_VSA10) col = 13 self.__testDesc(fName,col,MolSurf.SlogP_VSA11) col = 14 self.__testDesc(fName,col,MolSurf.SlogP_VSA12) def _testMOELong2(self): """ test calculation of MOE-type descriptors """ fName = 'PP_descrs_regress.VSA.2.csv' col = 1 self.__testDesc(fName,col,MolSurf.SMR_VSA1) col = 2 self.__testDesc(fName,col,MolSurf.SMR_VSA10) col = 11 self.__testDesc(fName,col,MolSurf.SlogP_VSA1) col = 12 self.__testDesc(fName,col,MolSurf.SlogP_VSA10) col = 13 self.__testDesc(fName,col,MolSurf.SlogP_VSA11) col = 14 self.__testDesc(fName,col,MolSurf.SlogP_VSA12) def testBalabanJ(self): """ test calculation of the Balaban J value J values are from Balaban's paper and have had roundoff errors and typos corrected. """ data = [# alkanes ('CC',1.0),('CCC',1.6330), ('CCCC',1.9747),('CC(C)C',2.3238), ('CCCCC',2.1906),('CC(C)CC',2.5396),('CC(C)(C)C',3.0237), ('CCCCCC',2.3391),('CC(C)CCC',2.6272),('CCC(C)CC',2.7542),('CC(C)(C)CC',3.1685), ('CC(C)C(C)C',2.9935), # cycloalkanes ('C1CCCCC1',2.0000), ('C1C(C)CCCC1',2.1229), ('C1C(CC)CCCC1',2.1250), ('C1C(C)C(C)CCC1',2.2794), ('C1C(C)CC(C)CC1',2.2307), ('C1C(C)CCC(C)C1',2.1924), ('C1C(CCC)CCCC1',2.0779), ('C1C(C(C)C)CCCC1',2.2284), ('C1C(CC)C(C)CCC1',2.2973), ('C1C(CC)CC(C)CC1',2.2317), ('C1C(CC)CCC(C)C1',2.1804), ('C1C(C)C(C)C(C)CC1',2.4133), ('C1C(C)C(C)CC(C)C1',2.3462), ('C1C(C)CC(C)CC1(C)',2.3409), # aromatics ('c1ccccc1',3.0000), ('c1c(C)cccc1',3.0215), ('c1c(CC)cccc1',2.8321), ('c1c(C)c(C)ccc1',3.1349), ('c1c(C)cc(C)cc1',3.0777), ('c1c(C)ccc(C)c1',3.0325), ('c1c(CCC)cccc1',2.6149), ('c1c(C(C)C)cccc1',2.8483), ('c1c(CC)c(C)ccc1',3.0065), ('c1c(CC)cc(C)cc1',2.9369), ('c1c(CC)ccc(C)c1',2.8816), ('c1c(C)c(C)c(C)cc1',3.2478), ('c1c(C)c(C)cc(C)c1',3.1717), ('c1c(C)cc(C)cc1(C)',3.1657) ] for smi,res in data: m = Chem.MolFromSmiles(smi) j = GraphDescriptors.BalabanJ(m,forceDMat=1) assert feq(j,res),'mol %s (J=%f) should have J=%f'%(smi,j,res) j = GraphDescriptors.BalabanJ(m) assert feq(j,res),'second pass: mol %s (J=%f) should have J=%f'%(smi,j,res) def _testBalabanJLong(self): """ test calculation of the balaban j value """ fName = 'PP_descrs_regress.rest.2.csv' col = 1 self.__testDesc(fName,col,GraphDescriptors.BalabanJ) def _testKappa1Long(self): """ test calculation of kappa1 """ fName = 'PP_descrs_regress.rest.2.csv' col = 31 self.__testDesc(fName,col,GraphDescriptors.Kappa1) def _testKappa2Long(self): """ test calculation of kappa2 """ fName = 'PP_descrs_regress.rest.2.csv' col = 32 self.__testDesc(fName,col,GraphDescriptors.Kappa2) def _testChi0Long(self): fName = 'PP_descrs_regress.rest.2.csv' col = 5 self.__testDesc(fName,col,GraphDescriptors.Chi0) def _testChi1Long(self): fName = 'PP_descrs_regress.rest.2.csv' col = 8 self.__testDesc(fName,col,GraphDescriptors.Chi1) def _testChi0v(self): """ test calculation of Chi0v """ data = [('CCCCCC',4.828),('CCC(C)CC',4.992),('CC(C)CCC',4.992), ('CC(C)C(C)C',5.155),('CC(C)(C)CC',5.207), ('CCCCCO',4.276),('CCC(O)CC',4.439),('CC(O)(C)CC',4.654),('c1ccccc1O',3.834), ('CCCl',2.841),('CCBr',3.671),('CCI',4.242)] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.Chi0v(m) assert feq(chi,res,1e-3),'mol %s (Chi0v=%f) should have Chi0V=%f'%(smi,chi,res) def _testChi0vLong(self): fName = 'PP_descrs_regress.rest.2.csv' col = 7 self.__testDesc(fName,col,GraphDescriptors.Chi0v) def testChi1v(self): """ test calculation of Chi1v """ data = [('CCCCCC',2.914),('CCC(C)CC',2.808),('CC(C)CCC',2.770), ('CC(C)C(C)C',2.643),('CC(C)(C)CC',2.561), ('CCCCCO',2.523),('CCC(O)CC',2.489),('CC(O)(C)CC',2.284),('c1ccccc1O',2.134)] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.Chi1v(m) assert feq(chi,res,1e-3),'mol %s (Chi1v=%f) should have Chi1V=%f'%(smi,chi,res) def _testChi1vLong(self): fName = 'PP_descrs_regress.rest.2.csv' col = 10 self.__testDesc(fName,col,GraphDescriptors.Chi1v) def testPathCounts(self): """ FIX: this should be in some other file """ data = [('CCCCCC',(6,5,4,3,2,1)), ('CCC(C)CC',(6,5,5,4,1,0)), ('CC(C)CCC',(6,5,5,3,2,0)), ('CC(C)C(C)C',(6,5,6,4,0,0)), ('CC(C)(C)CC',(6,5,7,3,0,0)), ('CCCCCO',(6,5,4,3,2,1)), ('CCC(O)CC',(6,5,5,4,1,0)), ('CC(O)(C)CC',(6,5,7,3,0,0)), ('c1ccccc1O',(7,7,8,8,8,8)), ] for smi,res in data: m = Chem.MolFromSmiles(smi) for i in range(1,6): cnt = len(Chem.FindAllPathsOfLengthN(m,i,useBonds=1)) assert cnt==res[i],(smi,i,cnt,res[i],Chem.FindAllPathsOfLengthN(m,i,useBonds=1)) cnt = len(Chem.FindAllPathsOfLengthN(m,i+1,useBonds=0)) assert cnt==res[i],(smi,i,cnt,res[i],Chem.FindAllPathsOfLengthN(m,i+1,useBonds=1)) def testChi2v(self): """ test calculation of Chi2v """ data = [('CCCCCC',1.707),('CCC(C)CC',1.922),('CC(C)CCC',2.183), ('CC(C)C(C)C',2.488),('CC(C)(C)CC',2.914), ('CCCCCO',1.431),('CCC(O)CC',1.470),('CC(O)(C)CC',2.166),('c1ccccc1O',1.336), ] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.Chi2v(m) assert feq(chi,res,1e-3),'mol %s (Chi2v=%f) should have Chi2V=%f'%(smi,chi,res) def _testChi2vLong(self): fName = 'PP_descrs_regress.rest.2.csv' col = 12 self.__testDesc(fName,col,GraphDescriptors.Chi2v) def testChi3v(self): """ test calculation of Chi3v """ data = [('CCCCCC',0.957),('CCC(C)CC',1.394),('CC(C)CCC',0.866),('CC(C)C(C)C',1.333),('CC(C)(C)CC',1.061), ('CCCCCO',0.762),('CCC(O)CC',0.943),('CC(O)(C)CC',0.865),('c1ccccc1O',0.756)] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.Chi3v(m) assert feq(chi,res,1e-3),'mol %s (Chi3v=%f) should have Chi3V=%f'%(smi,chi,res) def _testChi3vLong(self): fName = 'PP_descrs_regress.rest.2.csv' col = 14 self.__testDesc(fName,col,GraphDescriptors.Chi3v) def testChi4v(self): """ test calculation of Chi4v """ data = [('CCCCCC',0.500),('CCC(C)CC',0.289),('CC(C)CCC',0.577), ('CC(C)C(C)C',0.000),('CC(C)(C)CC',0.000), ('CCCCCO',0.362),('CCC(O)CC',0.289),('CC(O)(C)CC',0.000),('c1ccccc1O',0.428)] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.Chi4v(m) assert feq(chi,res,1e-3),'mol %s (Chi4v=%f) should have Chi4V=%f'%(smi,chi,res) def testChi5v(self): """ test calculation of Chi5v """ data = [('CCCCCC',0.250),('CCC(C)CC',0.000),('CC(C)CCC',0.000), ('CC(C)C(C)C',0.000),('CC(C)(C)CC',0.000), ('CCCCCO',0.112),('CCC(O)CC',0.000),('CC(O)(C)CC',0.000),('c1ccccc1O',0.242)] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.ChiNv_(m,5) assert feq(chi,res,1e-3),'mol %s (Chi5v=%f) should have Chi5V=%f'%(smi,chi,res) def testChi0n(self): """ test calculation of Chi0n """ data = [('CCCCCC',4.828),('CCC(C)CC',4.992),('CC(C)CCC',4.992), ('CC(C)C(C)C',5.155),('CC(C)(C)CC',5.207), ('CCCCCO',4.276),('CCC(O)CC',4.439),('CC(O)(C)CC',4.654),('c1ccccc1O',3.834), ('CCCl',2.085),('CCBr',2.085),('CCI',2.085),] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.Chi0n(m) assert feq(chi,res,1e-3),'mol %s (Chi0n=%f) should have Chi0n=%f'%(smi,chi,res) def _testChi0nLong(self): fName = 'PP_descrs_regress.rest.2.csv' col = 6 self.__testDesc(fName,col,GraphDescriptors.Chi0n) def testChi1n(self): """ test calculation of Chi1n """ data = [('CCCCCC',2.914),('CCC(C)CC',2.808),('CC(C)CCC',2.770), ('CC(C)C(C)C',2.643),('CC(C)(C)CC',2.561), ('CCCCCO',2.523),('CCC(O)CC',2.489),('CC(O)(C)CC',2.284),('c1ccccc1O',2.134)] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.Chi1n(m) assert feq(chi,res,1e-3),'mol %s (Chi1n=%f) should have Chi1N=%f'%(smi,chi,res) def _testChi1nLong(self): fName = 'PP_descrs_regress.rest.2.csv' col = 9 self.__testDesc(fName,col,GraphDescriptors.Chi1n) def testChi2n(self): """ test calculation of Chi2n """ data = [('CCCCCC',1.707),('CCC(C)CC',1.922),('CC(C)CCC',2.183), ('CC(C)C(C)C',2.488),('CC(C)(C)CC',2.914), ('CCCCCO',1.431),('CCC(O)CC',1.470),('CC(O)(C)CC',2.166),('c1ccccc1O',1.336)] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.Chi2n(m) assert feq(chi,res,1e-3),'mol %s (Chi2n=%f) should have Chi2N=%f'%(smi,chi,res) def _testChi2nLong(self): fName = 'PP_descrs_regress.rest.2.csv' col = 11 self.__testDesc(fName,col,GraphDescriptors.Chi2n) def testChi3n(self): """ test calculation of Chi3n """ data = [('CCCCCC',0.957),('CCC(C)CC',1.394),('CC(C)CCC',0.866),('CC(C)C(C)C',1.333),('CC(C)(C)CC',1.061), ('CCCCCO',0.762),('CCC(O)CC',0.943),('CC(O)(C)CC',0.865),('c1ccccc1O',0.756)] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.Chi3n(m) assert feq(chi,res,1e-3),'mol %s (Chi3n=%f) should have Chi3N=%f'%(smi,chi,res) def _testChi3nLong(self): fName = 'PP_descrs_regress.rest.2.csv' col = 13 self.__testDesc(fName,col,GraphDescriptors.Chi3n) def testChi4n(self): """ test calculation of Chi4n """ data = [('CCCCCC',0.500),('CCC(C)CC',0.289),('CC(C)CCC',0.577), ('CC(C)C(C)C',0.000),('CC(C)(C)CC',0.000), ('CCCCCO',0.362),('CCC(O)CC',0.289),('CC(O)(C)CC',0.000),('c1ccccc1O',0.428)] for smi,res in data: m = Chem.MolFromSmiles(smi) chi = GraphDescriptors.Chi4n(m) assert feq(chi,res,1e-3),'mol %s (Chi4n=%f) should have Chi4N=%f'%(smi,chi,res) def testIssue125(self): """ test an issue with calculating BalabanJ """ smi = 'O=C(OC)C1=C(C)NC(C)=C(C(OC)=O)C1C2=CC=CC=C2[N+]([O-])=O' m1 = Chem.MolFromSmiles(smi) m2 = Chem.MolFromSmiles(smi) Chem.MolToSmiles(m1) j1=GraphDescriptors.BalabanJ(m1) j2=GraphDescriptors.BalabanJ(m2) assert feq(j1,j2) def testOrderDepend(self): """ test order dependence of some descriptors: """ data = [('C=CC=C',21.01955,2.73205), ('O=CC=O',25.01955,2.73205), ('FCC(=O)CF',46.7548875,2.98816), ('O=C1C=CC(=O)C=C1',148.705216,2.8265), ('C12C(F)=C(O)C(F)C1C(F)=C(O)C(F)2',315.250442,2.4509), ('C12CC=CCC1C(=O)C3CC=CCC3C(=O)2',321.539522,1.95986)] for smi,CT,bal in data: m = Chem.MolFromSmiles(smi) newBal = GraphDescriptors.BalabanJ(m, forceDMat = 1) assert feq(newBal,bal,1e-4),'mol %s %f!=%f'%(smi,newBal,bal) m = Chem.MolFromSmiles(smi) newCT = GraphDescriptors.BertzCT(m, forceDMat = 1) assert feq(newCT,CT,1e-4),'mol %s (CT calc = %f) should have CT = %f'%(smi,newCT,CT) m = Chem.MolFromSmiles(smi) newCT = GraphDescriptors.BertzCT(m, forceDMat = 1) assert feq(newCT,CT,1e-4),'mol %s (CT calc = %f) should have CT = %f'%(smi,newCT,CT) newBal = GraphDescriptors.BalabanJ(m, forceDMat = 1) assert feq(newBal,bal,1e-4),'mol %s %f!=%f'%(smi,newBal,bal) m = Chem.MolFromSmiles(smi) newBal = GraphDescriptors.BalabanJ(m, forceDMat = 1) assert feq(newBal,bal,1e-4),'mol %s %f!=%f'%(smi,newBal,bal) newCT = GraphDescriptors.BertzCT(m, forceDMat = 1) assert feq(newCT,CT,1e-4),'mol %s (CT calc = %f) should have CT = %f'%(smi,newCT,CT) if __name__ == '__main__': import sys,getopt,re doLong=0 if len(sys.argv) >1: args,extras=getopt.getopt(sys.argv[1:],'l') for arg,val in args: if arg=='-l': doLong=1 sys.argv.remove('-l') if doLong: for methName in dir(TestCase): if re.match('_test',methName): newName = re.sub('_test','test',methName) exec('TestCase.%s = TestCase.%s'%(newName,methName)) unittest.main() from __future__ import print_function, division import matplotlib matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer from lasagne.nonlinearities import sigmoid, rectify from lasagne.objectives import crossentropy, mse from lasagne.init import Uniform, Normal from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer from lasagne.updates import adagrad, nesterov_momentum from functools import partial import os from neuralnilm.source import standardise from neuralnilm.experiment import run_experiment from neuralnilm.net import TrainingError import __main__ NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" SAVE_PLOT_INTERVAL = 250 GRADIENT_STEPS = 100 """ e103 Discovered that bottom layer is hardly changing. So will try just a single lstm layer e104 standard init lower learning rate e106 lower learning rate to 0.001 e108 is e107 but with batch size of 5 e109 Normal(1) for LSTM e110 * Back to Uniform(5) for LSTM * Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f RESULTS: Seems to run fine again! e111 * Try with nntools head * peepholes=False RESULTS: appears to be working well. Haven't seen a NaN, even with training rate of 0.1 e112 * n_seq_per_batch = 50 e114 * Trying looking at layer by layer training again. * Start with single LSTM layer e115 * Learning rate = 1 e116 * Standard inits e117 * Uniform(1) init e119 * Learning rate 10 # Result: didn't work well! e120 * init: Normal(1) * not as good as Uniform(5) e121 * Uniform(25) e122 * Just 10 cells * Uniform(5) e125 * Pre-train lower layers e128 * Add back all 5 appliances * Seq length 1500 * skip_prob = 0.7 e129 * max_input_power = None * 2nd layer has Uniform(5) * pre-train bottom layer for 2000 epochs * add third layer at 4000 epochs """ def exp_a(name): source = RealApplianceSource( filename='/data/dk3810/ukdale.h5', appliances=[ ['fridge freezer', 'fridge', 'freezer'], 'hair straighteners', 'television', 'dish washer', ['washer dryer', 'washing machine'] ], max_appliance_powers=[300, 500, 200, 2500, 2400], on_power_thresholds=[20, 20, 20, 20, 20], max_input_power=None, min_on_durations=[60, 60, 60, 1800, 1800], min_off_durations=[12, 12, 12, 1800, 600], window=("2013-06-01", "2014-07-01"), seq_length=1500, output_one_appliance=False, boolean_targets=False, train_buildings=[1], validation_buildings=[1], skip_probability=0.7, n_seq_per_batch=50 ) net = Net( experiment_name=name, source=source, save_plot_interval=SAVE_PLOT_INTERVAL, loss_function=crossentropy, updates=partial(nesterov_momentum, learning_rate=1.0), layers_config=[ { 'type': BLSTMLayer, 'num_units': 50, 'W_in_to_cell': Uniform(25), 'gradient_steps': GRADIENT_STEPS, 'peepholes': False }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': sigmoid } ], layer_changes={ 2001: { 'remove_from': -3, 'new_layers': [ { 'type': BLSTMLayer, 'num_units': 50, 'W_in_to_cell': Uniform(5), 'gradient_steps': GRADIENT_STEPS, 'peepholes': False }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': sigmoid } ] }, 4001: { 'remove_from': -3, 'new_layers': [ { 'type': BLSTMLayer, 'num_units': 50, 'W_in_to_cell': Uniform(5), 'gradient_steps': GRADIENT_STEPS, 'peepholes': False }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': sigmoid } ] } } ) return net def init_experiment(experiment): full_exp_name = NAME + experiment func_call = 'exp_{:s}(full_exp_name)'.format(experiment) print("***********************************") print("Preparing", full_exp_name, "...") net = eval(func_call) return net def main(): for experiment in list('a'): full_exp_name = NAME + experiment path = os.path.join(PATH, full_exp_name) try: net = init_experiment(experiment) run_experiment(net, path, epochs=5000) except KeyboardInterrupt: break except TrainingError as e: print("EXCEPTION:", e) if __name__ == "__main__": main() from django.http import HttpResponse, Http404 from django.template import loader from django.contrib.sites.models import get_current_site from django.core import urlresolvers from django.core.paginator import EmptyPage, PageNotAnInteger from django.contrib.gis.db.models.fields import GeometryField from django.db import connections, DEFAULT_DB_ALIAS from django.db.models import get_model from django.utils.encoding import smart_str from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz def index(request, sitemaps): """ This view generates a sitemap index that uses the proper view for resolving geographic section sitemap URLs. """ current_site = get_current_site(request) sites = [] protocol = request.is_secure() and 'https' or 'http' for section, site in sitemaps.items(): if callable(site): pages = site().paginator.num_pages else: pages = site.paginator.num_pages sitemap_url = urlresolvers.reverse('django.contrib.gis.sitemaps.views.sitemap', kwargs={'section': section}) sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url)) if pages > 1: for page in range(2, pages+1): sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page)) xml = loader.render_to_string('sitemap_index.xml', {'sitemaps': sites}) return HttpResponse(xml, mimetype='application/xml') def sitemap(request, sitemaps, section=None): """ This view generates a sitemap with additional geographic elements defined by Google. """ maps, urls = [], [] if section is not None: if section not in sitemaps: raise Http404("No sitemap available for section: %r" % section) maps.append(sitemaps[section]) else: maps = sitemaps.values() page = request.GET.get("p", 1) current_site = get_current_site(request) for site in maps: try: if callable(site): urls.extend(site().get_urls(page=page, site=current_site)) else: urls.extend(site.get_urls(page=page, site=current_site)) except EmptyPage: raise Http404("Page %s empty" % page) except PageNotAnInteger: raise Http404("No page '%s'" % page) xml = smart_str(loader.render_to_string('gis/sitemaps/geo_sitemap.xml', {'urlset': urls})) return HttpResponse(xml, mimetype='application/xml') def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS): """ This view generates KML for the given app label, model, and field name. The model's default manager must be GeoManager, and the field name must be that of a geographic field. """ placemarks = [] klass = get_model(label, model) if not klass: raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model)) if field_name: try: info = klass._meta.get_field_by_name(field_name) if not isinstance(info[0], GeometryField): raise Exception except: raise Http404('Invalid geometry field.') connection = connections[using] if connection.ops.postgis: # PostGIS will take care of transformation. placemarks = klass._default_manager.using(using).kml(field_name=field_name) else: # There's no KML method on Oracle or MySQL, so we use the `kml` # attribute of the lazy geometry instead. placemarks = [] if connection.ops.oracle: qs = klass._default_manager.using(using).transform(4326, field_name=field_name) else: qs = klass._default_manager.using(using).all() for mod in qs: setattr(mod, 'kml', getattr(mod, field_name).kml) placemarks.append(mod) # Getting the render function and rendering to the correct. if compress: render = render_to_kmz else: render = render_to_kml return render('gis/kml/placemarks.kml', {'places' : placemarks}) def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS): """ This view returns KMZ for the given app label, model, and field name. """ return kml(request, label, model, field_name, compress=True, using=using) #!/usr/bin/env python import sys import getpass import datetime # PyGithub >= 1.13 is required https://pypi.python.org/pypi/PyGithub from github import Github from github_issues import GithubIssuesBase, GithubIssues # You could use OAuth here too for unattended access # see http://developer.github.com/v3/oauth/#create-a-new-authorization print "Enter github username:" username = sys.stdin.readline().strip() print password = getpass.getpass('Enter github password: ') gh = Github(login_or_token=username, password=password, user_agent='PyGithub/Python') # needed to fetch fresh rate_limiting data repo = gh.get_repo('avocado-framework/avocado-vt') # Requests for logged in users are limited to 5000 per hour # or about 1 request every 0.7 seconds start = gh.rate_limiting # Open up cache and repository issues = GithubIssues(gh, 'avocado-framework/avocado-vt') print "Issue #125: ", # Any issue can be referenced by number print issues[125] end = gh.rate_limiting print "Requests used: ", start[0] - end[0] print "Cache hits: %s misses: %s" % (issues.cache_hits, issues.cache_misses) # Pull requests are treated as issues issues = GithubIssues(gh, 'avocado-framework/avocado-vt') start = end print "Pull #526: ", print issues[526] end = gh.rate_limiting print "Requests used: ", start[0] - end[0] print "Cache hits: %s misses: %s" % (issues.cache_hits, issues.cache_misses) # Listing issues requires finding the last issue # this takes a while when the cache is empty issues = GithubIssues(gh, 'avocado-framework/avocado-vt') start = end print "Total number of issues (this could take a while):" # This len() is used to force the slower binary-search print GithubIssuesBase.__len__(issues) end = gh.rate_limiting print "Requests used: ", start[0] - end[0] print "Cache hits: %s misses: %s" % (issues.cache_hits, issues.cache_misses) # Searches are supported and return lists of issue-numbers issues = GithubIssues(gh, 'avocado-framework/avocado-vt') start = end print "Open issues last few days without any label (could take 2-10 minutes):" two_days = datetime.timedelta(days=2) last_week = datetime.datetime.now() - two_days # Search criteria is put into a dictionary # state - str - 'open', 'closed' # assignee - list of str (login), "none" or "*" # mentioned - str (login) # labels - list of str (label name) # sort - str - 'created', 'updated', 'comments' # direction - str - 'asc', 'desc' # since - datetime.datetime criteria = {'state': 'open', 'since': last_week} # Search results are cached for 10-minutes, otherwise searches can be slow for number in issues.search(criteria): issue = issues[number] # some items must be searched/compared manually if len(issue['labels']) < 1: print ('https://github.com/avocado-framework/avocado-vt/issues/%s\t"%s"' % (issue['number'], issue['summary'])) print print "Requests used: ", start[0] - end[0] print "Cache hits: %s misses: %s" % (issues.cache_hits, issues.cache_misses) # Now that cache is populated, this will be very fast issues = GithubIssues(gh, 'avocado-framework/avocado-vt') start = end print "Total number of issues (this should be a lot faster):" # This length uses a cached issue count plus a 'since' criteria search print len(issues) end = gh.rate_limiting print "Final Requests used: ", start[0] - end[0] print "Cache hits: %s misses: %s" % (issues.cache_hits, issues.cache_misses) del issues # -*- encoding: utf-8 -*- ############################################################################## # # Author: Fekete Mihai , Tatár Attila # Copyright (C) 2011-2014 TOTAL PC SYSTEMS (http://www.erpsystems.ro). # Copyright (C) 2014 Fekete Mihai # Copyright (C) 2014 Tatár Attila # Based on precedent versions developed by Fil System, Fekete Mihai # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ############################################################################## import res_partner # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: class CheckUser: """ A class to perform various ban checks on a User object. The checks will be done, against the different ban lists and other ban rules in the config file. If a test is True, then the user will be banned. """ def __init__(self, tinybot, user, conf): """ Initialize the CheckUser class. :param tinybot: An instance of TinychatBot. :type tinybot: TinychatBot :param user: The User object to check. :type user: User :param conf: The config file. :type conf: config """ self.tinybot = tinybot self.user = user self.config = conf def check_account(self): """ Check if the user account is in the account bans list. :return: True, if the user was banned. :rtype: bool """ if self.user.account in self.config.B_ACCOUNT_BANS: if self.config.B_USE_KICK_AS_AUTOBAN: self.tinybot.send_kick_msg(self.user.id) else: self.tinybot.send_ban_msg(self.user.id) if self.config.B_USE_KICK_AS_AUTOBAN: self.tinybot.send_chat_msg('Auto-Kicked: (bad account)') else: self.tinybot.send_chat_msg('Auto-Banned: (bad account)') return True return False def guest_entry(self): """ Check if the user is a guest, and allowed to join. :return: True, if the user was banned. :rtype: bool """ if self.user.account == '' and not self.config.B_ALLOW_GUESTS: if self.config.B_USE_KICK_AS_AUTOBAN: self.tinybot.send_kick_msg(self.user.id) else: self.tinybot.send_ban_msg(self.user.id) if self.config.B_USE_KICK_AS_AUTOBAN: self.tinybot.send_chat_msg('Auto-Kicked: (guests not allowed)') else: self.tinybot.send_chat_msg('Auto-Banned: (guests not allowed)') return True return False def check_nick(self): """ Check if the user's nick is in the nick bans list. :return: True, if the user was banned. :rtype: bool """ if self.user.nick in self.config.B_NICK_BANS: if self.config.B_USE_KICK_AS_AUTOBAN: self.tinybot.send_kick_msg(self.user.id) else: self.tinybot.send_ban_msg(self.user.id) if self.config.B_USE_KICK_AS_AUTOBAN: self.tinybot.send_chat_msg('Auto-Kicked: (bad nick)') else: self.tinybot.send_chat_msg('Auto-Banned: (bad nick)') return True return False def check_lurker(self): """ Check if the user is a lurker, and allowed to join. :return: True, if the user was banned. :rtype: bool """ if self.user.is_lurker and not self.config.B_ALLOW_LURKERS: if self.config.B_USE_KICK_AS_AUTOBAN: self.tinybot.send_kick_msg(self.user.id) else: self.tinybot.send_ban_msg(self.user.id) if self.config.B_USE_KICK_AS_AUTOBAN: self.tinybot.send_chat_msg('Auto-Kicked: (lurkers not allowed)') else: self.tinybot.send_chat_msg('Auto-Banned: (lurkers not allowed)') return True return False ''' /******************************************************************************* * * Copyright (c) 2015 Fraunhofer FOKUS, All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3.0 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see . * * AUTHORS: Louay Bassbouss (louay.bassbouss@fokus.fraunhofer.de) * ******************************************************************************/ ''' from django.contrib.auth.models import User from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist from tastypie import fields from tastypie.utils import trailing_slash, dict_strip_unicode_keys from tastypie.http import HttpGone, HttpMultipleChoices from tastypie.authentication import Authentication from tastypie.authorization import Authorization from django.conf.urls.defaults import url from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS from tastypie.cache import SimpleCache import api.models class ReportResource(ModelResource): user = fields.ToOneField('api.tasty.UserResource', 'user', full=True) status = fields.ToOneField('api.tasty.StatusResource', 'status', full=True) category = fields.ToOneField('api.tasty.CategoryResource', 'category', full=True) address = fields.ToOneField('api.tasty.AddressResource', 'address', full=True) comments = fields.ToManyField('api.tasty.CommentResource', 'comment_set', related_name='report', blank=True) ratings = fields.ToManyField('api.tasty.RatingResource', 'rating_set', related_name='report', blank=True) photos = fields.ToManyField('api.tasty.PhotoResource', 'photo_set', related_name='report', blank=True, full=True) class Meta: resource_name = 'reports' queryset = api.models.Report.objects.all() authentication = Authentication() authorization = Authorization() list_allowed_methods = ['get', 'post'] detail_allowed_methods = ['get', 'post', 'put', 'delete'] # no 'patch' always_return_data = True # XXX #cache = SimpleCache() filtering = { 'user': ALL_WITH_RELATIONS, 'status': ALL_WITH_RELATIONS, 'photos': ALL_WITH_RELATIONS, 'tags': ['icontains'], 'address': ALL_WITH_RELATIONS, 'category': ALL_WITH_RELATIONS, 'creationTime': ['exact', 'range', 'lt', 'lte', 'gte', 'gt'], } ordering = ['id', 'creationTime'] def build_filters(self, filters={}): """Add custom filters (radius, onlyWithPhotos)""" orm_filters = super(ReportResource, self).build_filters(filters) # filter by photos photos = api.models.Photo.objects.all() try: if int(filters.get('onlyWithPhotos')) == 1: orm_filters['pk__in'] = set([p.report.pk for p in photos]) except: pass # filter by distance try: lat = float(filters.get('latitude')) lng = float(filters.get('longitude')) rad = float(filters.get('radius')) reports = api.models.Report.objects.nearby(lat, lng, rad) pks = [r.pk for r in reports] try: orm_filters['pk__in'] = orm_filters['pk__in'].intersection(pks) except: orm_filters['pk__in'] = pks except: pass return orm_filters def obj_create(self, bundle, request=None, **kwargs): return super(ReportResource, self).obj_create(bundle, request, user=request.user) def hydrate_address(self, bundle): resrc = AddressResource() addr = dict((k[5:], v) for k,v in bundle.data.iteritems() \ if k.startswith('addr_')) geo = dict((k[4:], float(v)) for k,v in bundle.data.iteritems() \ if k.startswith('geo_')) addr.update(geo) addrbundle = resrc.build_bundle(obj=api.models.Address, data=dict_strip_unicode_keys(addr)) addrobj = resrc.obj_create(addrbundle).obj bundle.obj.address = addrobj return bundle def dehydrate(self, bundle): """ Calculate averageRating and append to response bundle. """ total_score = 0.0 ratings = api.models.Rating.objects.filter(report__id=bundle.obj.id) if not ratings.count(): return bundle for rating in ratings: total_score += rating.value bundle.data['averageRating'] = total_score / ratings.count() return bundle def dehydrate_description(self, bundle): #return bundle.data['description'].upper() return bundle.data['description'] # do nothing # nested resources # ref: latest/cookbook.html#nested-resources def override_urls(self): return [ url(r"^reports/(?P\d+)/(?P\w+)%s$" \ % trailing_slash(), self.wrap_view('handle_nested'), name='api_handle_nested'), ] def handle_nested(self, request, **kwargs): resource_name = kwargs.pop('nest_resource') resource = self.fields[resource_name].to_class().__class__ try: stripped_kwargs = self.remove_api_resource_names(kwargs) obj = self.cached_obj_get(request=request, **stripped_kwargs) except ObjectDoesNotExist: return HttpGone() except MultipleObjectsReturned: return HttpMultipleChoices('Multiple objects with this PK.') r = resource() if request.method.lower() == 'get': return r.get_list(request, report=obj.pk) elif request.method.lower() == 'post': cont_type = request.META.get('CONTENT_TYPE', 'application/json') deserialized = r.deserialize(request, format=cont_type) report_uri = ReportResource().get_resource_uri(obj) user_uri = UserResource().get_resource_uri(request.user) parms = {'report': report_uri, 'user': user_uri} if 'form' in cont_type: deserialized = dict( (str(k), v[0] if (type(v)==list and len(v)>0) else v) \ for k, v in deserialized.iteritems()) parms.update(deserialized) try: bundle = r.build_bundle( data=dict_strip_unicode_keys(parms), request=request ) r.is_valid(bundle, request) r.obj_create(bundle) # this creates the actual child except: raise ValueError(parms) bundle_dehyd = r.full_dehydrate(bundle); resp = r.create_response(request, bundle_dehyd) resp['location'] = r.get_resource_uri(bundle) resp.status_code = 201 return resp else: raise NotImplementedError('In POST and GET we trust.') class UserResource(ModelResource): class Meta: resource_name = 'users' queryset = User.objects.all() excludes = ['email', 'password', 'is_active', 'last_login', 'first_name', 'last_name', 'date_joined', 'is_staff', 'is_superuser'] allowed_methods = ['get'] #cache = SimpleCache() filtering = { 'username': ALL, } ordering = ['username', 'id'] class StatusResource(ModelResource): class Meta: resource_name = 'statuses' queryset = api.models.Status.objects.all() #cache = SimpleCache() class CategoryResource(ModelResource): class Meta: resource_name = 'categories' queryset = api.models.Category.objects.all() #cache = SimpleCache() class AddressResource(ModelResource): class Meta: queryset = api.models.Address.objects.all() excludes = ['id'] #cache = SimpleCache() class CommentResource(ModelResource): report = fields.ToOneField('api.tasty.ReportResource', 'report') user = fields.ToOneField(UserResource, 'user', full=True) newStatus = fields.ToOneField(StatusResource, 'newStatus', full=True, blank=True, null=True) class Meta: resource_name = 'comments' queryset = api.models.Comment.objects.all() authentication = Authentication() authorization = Authorization() #cache = SimpleCache() filtering = { 'report': ALL, 'user': ALL_WITH_RELATIONS, } ordering = ['creationTime', 'newStatus', 'user', 'report', 'id'] class RatingResource(ModelResource): report = fields.ToOneField('api.tasty.ReportResource', 'report') user = fields.ToOneField(UserResource, 'user', full=True) class Meta: resource_name = 'ratings' queryset = api.models.Rating.objects.all() #cache = SimpleCache() filtering = { 'report': ALL, 'user': ALL_WITH_RELATIONS, } class PhotoResource(ModelResource): report = fields.ToOneField('api.tasty.ReportResource', 'report') user = fields.ToOneField(UserResource, 'user') class Meta: resource_name = 'photos' queryset = api.models.Photo.objects.all() #cache = SimpleCache() filtering = { 'report': ALL, } # -*- coding: utf-8 -*- from __future__ import print_function from __future__ import unicode_literals from __future__ import division from django.core.management.base import BaseCommand from django.db import transaction from apps.core.models import Group from apps.survey.models import (Blockface, Territory, BlockfaceReservation) class Command(BaseCommand): """ Assign all expert_required blockfaces to a specified group Usage: ./manage.py assign_expert_blocks some-group-slug """ @transaction.atomic def handle(self, *args, **options): group_slug = args[0] group = Group.objects.get(slug=group_slug) print("Assigning expert blocks to %s" % group.name) already_assigned_ids = Territory.objects.filter(group=group)\ .values_list('blockface_id', flat=True) print("Skipping %d blocks already assigned" % already_assigned_ids.count()) new_expert_blocks = Blockface.objects\ .filter(expert_required=True)\ .exclude(id__in=already_assigned_ids) assigned_to_others = Territory.objects\ .filter(blockface__in=new_expert_blocks) print("Removing %d assignments to groups other than %s" % (assigned_to_others.count(), group.name)) assigned_to_others.delete() old_reservations =\ BlockfaceReservation.objects\ .filter(blockface__in=new_expert_blocks) print("Removing %d reservations on blocks that are being reassigned" % old_reservations.count()) old_reservations.delete() print("Assigning %d blocks to %s" % (new_expert_blocks.count(), group.name)) for blockface in new_expert_blocks: Territory.objects.create(group=group, blockface=blockface) """ tests.test_component_demo ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tests demo component. """ import unittest import homeassistant.core as ha import homeassistant.components.automation as automation import homeassistant.components.automation.state as state from homeassistant.const import CONF_PLATFORM class TestAutomationState(unittest.TestCase): """ Test the event automation. """ def setUp(self): # pylint: disable=invalid-name self.hass = ha.HomeAssistant() self.hass.states.set('test.entity', 'hello') self.calls = [] def record_call(service): self.calls.append(service) self.hass.services.register('test', 'automation', record_call) def tearDown(self): # pylint: disable=invalid-name """ Stop down stuff we started. """ self.hass.stop() def test_setup_fails_if_no_entity_id(self): self.assertFalse(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', automation.CONF_SERVICE: 'test.automation' } })) def test_if_fires_on_entity_change(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change_with_from_filter(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', state.CONF_FROM: 'hello', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change_with_to_filter(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', state.CONF_TO: 'world', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change_with_both_filters(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', state.CONF_FROM: 'hello', state.CONF_TO: 'world', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_not_fires_if_to_filter_not_match(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', state.CONF_FROM: 'hello', state.CONF_TO: 'world', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'moon') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_if_not_fires_if_from_filter_not_match(self): self.hass.states.set('test.entity', 'bye') self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.entity', state.CONF_FROM: 'hello', state.CONF_TO: 'world', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_if_not_fires_if_entity_not_match(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { CONF_PLATFORM: 'state', state.CONF_ENTITY_ID: 'test.another_entity', automation.CONF_SERVICE: 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import Big5DistributionAnalysis from .mbcssm import Big5SMModel class Big5Prober(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(Big5SMModel) self._mDistributionAnalyzer = Big5DistributionAnalysis() self.reset() def get_charset_name(self): return "Big5" # -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # # Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## import partner import invoice # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals # IMPORTANT: only import safe functions as this module will be included in jinja environment import frappe import operator import re, urllib, datetime, math import babel.dates # datetime functions def getdate(string_date): """ Coverts string date (yyyy-mm-dd) to datetime.date object """ if isinstance(string_date, datetime.date): return string_date elif isinstance(string_date, datetime.datetime): return string_date.date() if " " in string_date: string_date = string_date.split(" ")[0] return datetime.datetime.strptime(string_date, "%Y-%m-%d").date() def add_to_date(date, years=0, months=0, days=0): """Adds `days` to the given date""" format = isinstance(date, basestring) if date: date = getdate(date) else: raise Exception, "Start date required" from dateutil.relativedelta import relativedelta date += relativedelta(years=years, months=months, days=days) if format: return date.strftime("%Y-%m-%d") else: return date def add_days(date, days): return add_to_date(date, days=days) def add_months(date, months): return add_to_date(date, months=months) def add_years(date, years): return add_to_date(date, years=years) def date_diff(string_ed_date, string_st_date): return (getdate(string_ed_date) - getdate(string_st_date)).days def time_diff(string_ed_date, string_st_date): return get_datetime(string_ed_date) - get_datetime(string_st_date) def time_diff_in_seconds(string_ed_date, string_st_date): return time_diff(string_ed_date, string_st_date).total_seconds() def time_diff_in_hours(string_ed_date, string_st_date): return round(float(time_diff(string_ed_date, string_st_date).total_seconds()) / 3600, 6) def now_datetime(): return convert_utc_to_user_timezone(datetime.datetime.utcnow()) def get_user_time_zone(): if getattr(frappe.local, "user_time_zone", None) is None: frappe.local.user_time_zone = frappe.cache().get_value("time_zone") if not frappe.local.user_time_zone: frappe.local.user_time_zone = frappe.db.get_default('time_zone') or 'Asia/Calcutta' frappe.cache().set_value("time_zone", frappe.local.user_time_zone) return frappe.local.user_time_zone def convert_utc_to_user_timezone(utc_timestamp): from pytz import timezone, UnknownTimeZoneError utcnow = timezone('UTC').localize(utc_timestamp) try: return utcnow.astimezone(timezone(get_user_time_zone())) except UnknownTimeZoneError: return utcnow def now(): """return current datetime as yyyy-mm-dd hh:mm:ss""" if getattr(frappe.local, "current_date", None): return getdate(frappe.local.current_date).strftime("%Y-%m-%d") + " " + \ now_datetime().strftime('%H:%M:%S.%f') else: return now_datetime().strftime('%Y-%m-%d %H:%M:%S.%f') def nowdate(): """return current date as yyyy-mm-dd""" return now_datetime().strftime('%Y-%m-%d') def today(): return nowdate() def nowtime(): """return current time in hh:mm""" return now_datetime().strftime('%H:%M:%S.%f') def get_first_day(dt, d_years=0, d_months=0): """ Returns the first day of the month for the date specified by date object Also adds `d_years` and `d_months` if specified """ dt = getdate(dt) # d_years, d_months are "deltas" to apply to dt overflow_years, month = divmod(dt.month + d_months - 1, 12) year = dt.year + d_years + overflow_years return datetime.date(year, month + 1, 1) def get_last_day(dt): """ Returns last day of the month using: `get_first_day(dt, 0, 1) + datetime.timedelta(-1)` """ return get_first_day(dt, 0, 1) + datetime.timedelta(-1) def get_datetime(datetime_str): try: return datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S.%f') except TypeError: if isinstance(datetime_str, datetime.datetime): return datetime_str.replace(tzinfo=None) else: raise except ValueError: if datetime_str=='0000-00-00 00:00:00.000000': return None return datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S') def get_datetime_str(datetime_obj): if isinstance(datetime_obj, basestring): datetime_obj = get_datetime(datetime_obj) return datetime_obj.strftime('%Y-%m-%d %H:%M:%S.%f') def formatdate(string_date=None, format_string=None): """ Convers the given string date to :data:`user_format` User format specified in defaults Examples: * dd-mm-yyyy * mm-dd-yyyy * dd/mm/yyyy """ date = getdate(string_date) if string_date else now_datetime().date() if format_string: return babel.dates.format_date(date, format_string or "medium", locale=(frappe.local.lang or "").replace("-", "_")) else: if getattr(frappe.local, "user_format", None) is None: frappe.local.user_format = frappe.db.get_default("date_format") out = frappe.local.user_format or "yyyy-mm-dd" try: return out.replace("dd", date.strftime("%d"))\ .replace("mm", date.strftime("%m"))\ .replace("yyyy", date.strftime("%Y")) except ValueError, e: raise frappe.ValidationError, str(e) def global_date_format(date): """returns date as 1 January 2012""" formatted_date = getdate(date).strftime("%d %B %Y") return formatted_date.startswith("0") and formatted_date[1:] or formatted_date def has_common(l1, l2): """Returns truthy value if there are common elements in lists l1 and l2""" return set(l1) & set(l2) def flt(s, precision=None): """Convert to float (ignore commas)""" if isinstance(s, basestring): s = s.replace(',','') try: num = float(s) if precision is not None: num = rounded(num, precision) except Exception: num = 0 return num def cint(s): """Convert to integer""" try: num = int(float(s)) except: num = 0 return num def cstr(s): if isinstance(s, unicode): return s elif s==None: return '' elif isinstance(s, basestring): return unicode(s, 'utf-8') else: return unicode(s) def rounded(num, precision=0): """round method for round halfs to nearest even algorithm""" precision = cint(precision) multiplier = 10 ** precision # avoid rounding errors num = round(num * multiplier if precision else num, 8) floor = math.floor(num) decimal_part = num - floor if decimal_part == 0.5: num = floor if (floor % 2 == 0) else floor + 1 else: num = round(num) return (num / multiplier) if precision else num def encode(obj, encoding="utf-8"): if isinstance(obj, list): out = [] for o in obj: if isinstance(o, unicode): out.append(o.encode(encoding)) else: out.append(o) return out elif isinstance(obj, unicode): return obj.encode(encoding) else: return obj def parse_val(v): """Converts to simple datatypes from SQL query results""" if isinstance(v, (datetime.date, datetime.datetime)): v = unicode(v) elif isinstance(v, datetime.timedelta): v = ":".join(unicode(v).split(":")[:2]) elif isinstance(v, long): v = int(v) return v def fmt_money(amount, precision=None, currency=None): """ Convert to string with commas for thousands, millions etc """ number_format = None if currency: number_format = frappe.db.get_value("Currency", currency, "number_format") if not number_format: number_format = frappe.db.get_default("number_format") or "#,###.##" decimal_str, comma_str, number_format_precision = get_number_format_info(number_format) if precision is None: precision = number_format_precision amount = '%.*f' % (precision, flt(amount)) if amount.find('.') == -1: decimals = '' else: decimals = amount.split('.')[1] parts = [] minus = '' if flt(amount) < 0: minus = '-' amount = cstr(abs(flt(amount))).split('.')[0] if len(amount) > 3: parts.append(amount[-3:]) amount = amount[:-3] val = number_format=="#,##,###.##" and 2 or 3 while len(amount) > val: parts.append(amount[-val:]) amount = amount[:-val] parts.append(amount) parts.reverse() amount = comma_str.join(parts) + ((precision and decimal_str) and (decimal_str + decimals) or "") amount = minus + amount if currency and frappe.defaults.get_global_default("hide_currency_symbol") != "Yes": symbol = frappe.db.get_value("Currency", currency, "symbol") or currency amount = symbol + " " + amount return amount number_format_info = { "#,###.##": (".", ",", 2), "#.###,##": (",", ".", 2), "# ###.##": (".", " ", 2), "# ###,##": (",", " ", 2), "#'###.##": (".", "'", 2), "#, ###.##": (".", ", ", 2), "#,##,###.##": (".", ",", 2), "#,###.###": (".", ",", 3), "#.###": ("", ".", 0), "#,###": ("", ",", 0) } def get_number_format_info(format): return number_format_info.get(format) or (".", ",", 2) # # convet currency to words # def money_in_words(number, main_currency = None, fraction_currency=None): """ Returns string in words with currency and fraction currency. """ from frappe.utils import get_defaults if not number or flt(number) < 0: return "" d = get_defaults() if not main_currency: main_currency = d.get('currency', 'INR') if not fraction_currency: fraction_currency = frappe.db.get_value("Currency", main_currency, "fraction") or "Cent" n = "%.2f" % flt(number) main, fraction = n.split('.') if len(fraction)==1: fraction += '0' number_format = frappe.db.get_value("Currency", main_currency, "number_format") or \ frappe.db.get_default("number_format") or "#,###.##" in_million = True if number_format == "#,##,###.##": in_million = False out = main_currency + ' ' + in_words(main, in_million).title() if cint(fraction): out = out + ' and ' + in_words(fraction, in_million).title() + ' ' + fraction_currency return out + ' only.' # # convert number to words # def in_words(integer, in_million=True): """ Returns string in words for the given integer. """ n=int(integer) known = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten', 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'nineteen', 20: 'twenty', 30: 'thirty', 40: 'forty', 50: 'fifty', 60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninety'} def psn(n, known, xpsn): import sys; if n in known: return known[n] bestguess, remainder = str(n), 0 if n<=20: frappe.errprint(sys.stderr) frappe.errprint(n) frappe.errprint("How did this happen?") assert 0 elif n < 100: bestguess= xpsn((n//10)*10, known, xpsn) + '-' + xpsn(n%10, known, xpsn) return bestguess elif n < 1000: bestguess= xpsn(n//100, known, xpsn) + ' ' + 'hundred' remainder = n%100 else: if in_million: if n < 1000000: bestguess= xpsn(n//1000, known, xpsn) + ' ' + 'thousand' remainder = n%1000 elif n < 1000000000: bestguess= xpsn(n//1000000, known, xpsn) + ' ' + 'million' remainder = n%1000000 else: bestguess= xpsn(n//1000000000, known, xpsn) + ' ' + 'billion' remainder = n%1000000000 else: if n < 100000: bestguess= xpsn(n//1000, known, xpsn) + ' ' + 'thousand' remainder = n%1000 elif n < 10000000: bestguess= xpsn(n//100000, known, xpsn) + ' ' + 'lakh' remainder = n%100000 else: bestguess= xpsn(n//10000000, known, xpsn) + ' ' + 'crore' remainder = n%10000000 if remainder: if remainder >= 100: comma = ',' else: comma = '' return bestguess + comma + ' ' + xpsn(remainder, known, xpsn) else: return bestguess return psn(n, known, psn) def is_html(text): out = False for key in ["
", "|<[^>]*>)') def strip_html(text): """removes anything enclosed in and including <>""" return _striptags_re.sub("", text) def escape_html(text): html_escape_table = { "&": "&", '"': """, "'": "'", ">": ">", "<": "<", } return "".join(html_escape_table.get(c,c) for c in text) def pretty_date(iso_datetime): """ Takes an ISO time and returns a string representing how long ago the date represents. Ported from PrettyDate by John Resig """ if not iso_datetime: return '' import math if isinstance(iso_datetime, basestring): iso_datetime = datetime.datetime.strptime(iso_datetime, '%Y-%m-%d %H:%M:%S.%f') now_dt = datetime.datetime.strptime(now(), '%Y-%m-%d %H:%M:%S.%f') dt_diff = now_dt - iso_datetime # available only in python 2.7+ # dt_diff_seconds = dt_diff.total_seconds() dt_diff_seconds = dt_diff.days * 86400.0 + dt_diff.seconds dt_diff_days = math.floor(dt_diff_seconds / 86400.0) # differnt cases if dt_diff_seconds < 60.0: return 'just now' elif dt_diff_seconds < 120.0: return '1 minute ago' elif dt_diff_seconds < 3600.0: return '%s minutes ago' % cint(math.floor(dt_diff_seconds / 60.0)) elif dt_diff_seconds < 7200.0: return '1 hour ago' elif dt_diff_seconds < 86400.0: return '%s hours ago' % cint(math.floor(dt_diff_seconds / 3600.0)) elif dt_diff_days == 1.0: return 'Yesterday' elif dt_diff_days < 7.0: return '%s days ago' % cint(dt_diff_days) elif dt_diff_days < 31.0: return '%s week(s) ago' % cint(math.ceil(dt_diff_days / 7.0)) elif dt_diff_days < 365.0: return '%s months ago' % cint(math.ceil(dt_diff_days / 30.0)) else: return 'more than %s year(s) ago' % cint(math.floor(dt_diff_days / 365.0)) def comma_or(some_list): return comma_sep(some_list, " or ") def comma_and(some_list): return comma_sep(some_list, " and ") def comma_sep(some_list, sep): if isinstance(some_list, (list, tuple)): # list(some_list) is done to preserve the existing list some_list = [unicode(s) for s in list(some_list)] if not some_list: return "" elif len(some_list) == 1: return some_list[0] else: some_list = ["'%s'" % s for s in some_list] return ", ".join(some_list[:-1]) + sep + some_list[-1] else: return some_list def filter_strip_join(some_list, sep): """given a list, filter None values, strip spaces and join""" return (cstr(sep)).join((cstr(a).strip() for a in filter(None, some_list))) def get_url(uri=None, full_address=False): """get app url from request""" host_name = frappe.local.conf.host_name if not host_name: if hasattr(frappe.local, "request") and frappe.local.request and frappe.local.request.host: protocol = 'https' == frappe.get_request_header('X-Forwarded-Proto', "") and 'https://' or 'http://' host_name = protocol + frappe.local.request.host elif frappe.local.site: host_name = "http://{}".format(frappe.local.site) else: host_name = frappe.db.get_value("Website Settings", "Website Settings", "subdomain") if host_name and "http" not in host_name: host_name = "http://" + host_name if not host_name: host_name = "http://localhost" if not uri and full_address: uri = frappe.get_request_header("REQUEST_URI", "") url = urllib.basejoin(host_name, uri) if uri else host_name return url def get_url_to_form(doctype, name, label=None): if not label: label = name return """%(label)s""" % locals() operator_map = { # startswith "^": lambda (a, b): (a or "").startswith(b), # in or not in a list "in": lambda (a, b): operator.contains(b, a), "not in": lambda (a, b): not operator.contains(b, a), # comparison operators "=": lambda (a, b): operator.eq(a, b), "!=": lambda (a, b): operator.ne(a, b), ">": lambda (a, b): operator.gt(a, b), "<": lambda (a, b): operator.lt(a, b), ">=": lambda (a, b): operator.ge(a, b), "<=": lambda (a, b): operator.le(a, b), "not None": lambda (a, b): a and True or False, "None": lambda (a, b): (not a) and True or False } def compare(val1, condition, val2): ret = False if condition in operator_map: ret = operator_map[condition]((val1, val2)) return ret def scrub_urls(html): html = expand_relative_urls(html) html = quote_urls(html) return html def expand_relative_urls(html): # expand relative urls url = get_url() if url.endswith("/"): url = url[:-1] def _expand_relative_urls(match): to_expand = list(match.groups()) if not to_expand[2].startswith("/"): to_expand[2] = "/" + to_expand[2] to_expand.insert(2, url) return "".join(to_expand) return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?!http)[^\'" >]+)([\'"]?)', _expand_relative_urls, html) def quote_urls(html): def _quote_url(match): groups = list(match.groups()) groups[2] = urllib.quote(groups[2].encode("utf-8"), safe=b"~@#$&()*!+=:;,.?/'").decode("utf-8") return "".join(groups) return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?:http)[^\'">]+)([\'"]?)', _quote_url, html) def unique(seq): """use this instead of list(set()) to preserve order of the original list. Thanks to Stackoverflow: http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order""" seen = set() seen_add = seen.add return [ x for x in seq if not (x in seen or seen_add(x)) ] """ Base classes and utility functions for Data API service classes. """ __author__ = 'Dan Gunter ' __date__ = '12/24/15' # Imports # ------- # Stdlib import functools import logging import os import signal import time import traceback # Third party import twisted.internet import twisted.web from thrift.transport import THttpClient from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.transport import TTwisted # Local from doekbase.data_api import exceptions, util # Global constants and variables # ------------------------------ DEFAULT_WS_URL = 'https://ci.kbase.us/services/ws/' DEFAULT_SHOCK_URL = 'https://ci.kbase.us/services/shock-api/' SERVICES_DICT = {'workspace_service_url': DEFAULT_WS_URL, 'shock_service_url' : DEFAULT_SHOCK_URL} # Functions and classes # --------------------- def server_method(func): """Decorator for service methods. The wrapper depends on the existence of two attributes in the class being wrapped: 1. ttypes (module): Thrift type module, containing exception classes 2. log (logging.Logger): Logger instance Args: func (function): Function being wrapped """ def wrapper(self, token, ref, *args, **kwargs): assert hasattr(self, 'log'), 'Method in wrapped class must have "log" ' \ 'attribute' assert hasattr(self, 'ttypes'), 'Method in wrapped class must have ' \ '"ttypes" attribute' error, result = None, None #self.log.debug('method={meth} state=begin token={tok} ref={ref} args={' self.log.debug('method={meth} state=begin ref={ref} args={' 'args} kwargs={kw}' .format(meth=func.__name__, tok=token, ref=ref, args=args, kw=kwargs)) t0 = time.time() try: result = func(self, token, ref, *args, **kwargs) except AttributeError, e: error = e raise self.ttypes.AttributeException(str(e.message), traceback.format_exc()) except exceptions.AuthenticationError, e: error = e raise self.ttypes.AuthenticationException(str(e.message), traceback.format_exc()) except exceptions.AuthorizationError, e: error = e raise self.ttypes.AuthorizationException(str(e.message), traceback.format_exc()) except TypeError, e: error = e raise self.ttypes.TypeException(str(e.message), traceback.format_exc()) except Exception, e: error = e raise self.ttypes.ServiceException(str(e.message), traceback.format_exc(), {"ref": str(ref)}) finally: if error is None: #self.log.debug('method={meth} state=end token={tok} ref={ref} ' self.log.debug('method={meth} state=end ref={ref} ' 'args={args} kwargs={kw} dur={t:.3f}' .format(meth=func.__name__, tok=token, ref=ref, args=args, kw=kwargs, t=time.time() - t0)) else: #self.log.error('method={meth} state=error token={tok} ' self.log.error('method={meth} state=error ' 'ref={ref} args={args} kwargs={kw}' 'error_message="{m}" dur={t:.3f}' .format(meth=func.__name__, tok=token, ref=ref, args=args, kw=kwargs, m=str(error), t=time.time() - t0)) return result return wrapper class BaseService(object): """Base class for Data API service classes, which will be defined in the 'interface' module of the appropriate API subdirectory. Takes care of some boilerplate logging and error-checking, as well as setting up instance variables for the @server_method decorator. """ def __init__(self, log, ttypes_module, api_class, services=None): """Constructor. Args: log (logging.Logger): For logging service activity ttypes_module: Thrift ttypes module for the API api_class: the API library class, e.g., `doekbase.data_api.taxonomy.taxon.api.TaxonAPI` services (dict): Service configuration dictionary, passed to constructor of the `api_class`. """ self.log = log self.ttypes = ttypes_module self._api_class = api_class self.log.debug('method=__init__ state=begin services={s}' .format(s=services)) try: if services is None or not isinstance(services, dict): raise TypeError("You must provide a service configuration " + "dictionary! Found {0}".format(type(services))) elif not services.has_key("workspace_service_url"): raise KeyError("Expecting workspace_service_url key!") except Exception as e: self.log.error('method=__init__ state=error services={s}' 'error_message="{m}"' .format(s=services, m=e.message)) raise self.services = services self.log.debug('method=__init__ state=end services={s} ' .format(s=services)) def _get_instance(self, *args): """Return an instance of the API. Use this level of indirection to allow future optimizations over creating it each time. """ return self._api_class(self.services, *args) class BaseClientConnection(object): """Base class for ClientConnection objects defined in the data_api..service.interface module. """ def __init__(self, thrift_client, url): if not hasattr(thrift_client, 'Client') or not callable( thrift_client.Client): raise AttributeError('Invalid "thrift_client" argument') self.client = None self.transport = None self.protocol = None try: self.transport = THttpClient.THttpClient(url) self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport) self.client = thrift_client.Client(self.protocol) except AssertionError: raise ValueError('Invalid Thrift client URL: "{}"'.format(url)) except TTransport.TTransportException as err: raise RuntimeError( 'Cannot connect to remote Thrift service at {}: {}' .format(url, err.message)) def get_client(self): return self.transport, self.client # For service drivers def start_service(api_class, service_class, log, services=None, host='localhost', port=9100, killprocgrp=False): """Start a Data API service. Args: api_class (BaseService): The custom API service class, e.g., `doekbase.data_api.taxonomy.taxon.api.TaxonService` service_class (type): The Thrift auto-generated service class log (logging.Logger): Logging object services (dict): Service configuration dictionary, passed to constructor of the `api_class`. host (str): Service host (will default to 'localhost') port (int): Service port, e.g. 9101 killprocgrp (bool): if True, kill process group on exit """ assert issubclass(api_class, BaseService), \ 'Invalid "api_class": must be a subclass of ' \ 'doekbase.data_api.service_core.BaseService' assert hasattr(service_class, 'Processor'), 'Invalid "service_class": ' \ 'missing "Processor" attribute' assert isinstance(port, int), 'The "port" must be an integer' svc_t0 = util.log_start(log, 'start_service', kvp=dict(host=host, port=port)) # Create server services = services or SERVICES_DICT handler = api_class(services) processor = service_class.Processor(handler) pfactory = TBinaryProtocol.TBinaryProtocolFactory() resource = TTwisted.ThriftResource(processor, pfactory, pfactory) site = twisted.web.server.Site(resource=resource) twisted.internet.reactor.listenTCP(port, site, interface=host) # Kill entire process group on shutdown if killprocgrp: twisted.internet.reactor.addSystemEventTrigger('before', 'shutdown', functools.partial( kill_process_group, log=log)) # Run server sname = api_class.__name__ shost = host or 'localhost' util.log_start(log, 'server', kvp=dict(name=sname, host=shost, port=port)) t0 = util.log_start(log, 'twisted.internet.reactor.run', level=logging.DEBUG) try: twisted.internet.reactor.run() except Exception as err: log.error('msg="Abort {} server on error"'.format(sname)) util.log_end(log, t0, 'twisted.internet.reactor.run', status_code=1, level=logging.ERROR, kvp=dict(msg=err)) raise finally: util.log_end(log, t0, 'twisted.internet.reactor.run') util.log_end(log, svc_t0, 'start_service', kvp=dict(host=host, port=port)) return 0 def stop_service(): twisted.internet.reactor.stop() def kill_process_group(log): """Kill entire process group on Twisted shutdown. Args: log (logging.Logger): Logger """ pid = os.getpid() # my pid grpid = -os.getpgid(pid) # my process group signo = signal.SIGINT # the signal to send t = util.log_start(log, 'kill_process_group', level=logging.WARN, kvp=dict(pid=pid, group_pid=grpid, signal=signo)) # ignore signal in this process (Twisted is already shutting down) signal.signal(signo, signal.SIG_IGN) # send the signal to my process group os.kill(grpid, signo) util.log_end(log, t, 'kill_process_group', level=logging.WARN, kvp=dict(pid=pid, group_pid=grpid, signal=signo)) CHAR_MAX = 127 CHAR_MIN = -128 DBL_MAX = 1.7976931348623157e+308 DBL_MIN = 2.2250738585072014e-308 FLT_MAX = 3.4028234663852886e+38 FLT_MIN = 1.1754943508222875e-38 INT_MAX = 2147483647 INT_MIN = -2147483648 LLONG_MAX = 9223372036854775807 LLONG_MIN = -9223372036854775808 LONG_MAX = 2147483647 LONG_MIN = -2147483648 PY_SSIZE_T_MAX = 2147483647 PY_SSIZE_T_MIN = -2147483648 SHRT_MAX = 32767 SHRT_MIN = -32768 SIZEOF_PYGC_HEAD = 16 UCHAR_MAX = 255 UINT_MAX = 4294967295 ULLONG_MAX = 18446744073709551615 ULONG_MAX = 4294967295 USHRT_MAX = 65535 __loader__ = "<_frozen_importlib.ExtensionFileLoader object at 0x00C98DD0>" def _pending_threadfunc(*args,**kw): pass class _test_structmembersType(object): pass def _test_thread_state(*args,**kw): pass def argparsing(*args,**kw): pass def code_newempty(*args,**kw): pass def codec_incrementaldecoder(*args,**kw): pass def codec_incrementalencoder(*args,**kw): pass def crash_no_current_thread(*args,**kw): pass class error(Exception): pass def exception_print(*args,**kw): pass def getargs_B(*args,**kw): pass def getargs_H(*args,**kw): pass def getargs_I(*args,**kw): pass def getargs_K(*args,**kw): pass def getargs_L(*args,**kw): pass def getargs_Z(*args,**kw): pass def getargs_Z_hash(*args,**kw): pass def getargs_b(*args,**kw): pass def getargs_c(*args,**kw): pass def getargs_h(*args,**kw): pass def getargs_i(*args,**kw): pass def getargs_k(*args,**kw): pass def getargs_keyword_only(*args,**kw): pass def getargs_keywords(*args,**kw): pass def getargs_l(*args,**kw): pass def getargs_n(*args,**kw): pass def getargs_p(*args,**kw): pass def getargs_s(*args,**kw): pass def getargs_s_hash(*args,**kw): pass def getargs_s_star(*args,**kw): pass def getargs_tuple(*args,**kw): pass def getargs_u(*args,**kw): pass def getargs_u_hash(*args,**kw): pass def getargs_w_star(*args,**kw): pass def getargs_y(*args,**kw): pass def getargs_y_hash(*args,**kw): pass def getargs_y_star(*args,**kw): pass def getargs_z(*args,**kw): pass def getargs_z_hash(*args,**kw): pass def getargs_z_star(*args,**kw): pass class instancemethod(object): pass def make_exception_with_doc(*args,**kw): pass def make_memoryview_from_NULL_pointer(*args,**kw): pass def parse_tuple_and_keywords(*args,**kw): pass def pytime_object_to_time_t(*args,**kw): pass def pytime_object_to_timespec(*args,**kw): pass def pytime_object_to_timeval(*args,**kw): pass def raise_exception(*args,**kw): pass def raise_memoryerror(*args,**kw): pass def run_in_subinterp(*args,**kw): pass def set_exc_info(*args,**kw): pass def test_L_code(*args,**kw): pass def test_Z_code(*args,**kw): pass def test_capsule(*args,**kw): pass def test_config(*args,**kw): pass def test_datetime_capi(*args,**kw): pass def test_dict_iteration(*args,**kw): pass def test_empty_argparse(*args,**kw): pass def test_k_code(*args,**kw): pass def test_lazy_hash_inheritance(*args,**kw): pass def test_list_api(*args,**kw): pass def test_long_and_overflow(*args,**kw): pass def test_long_api(*args,**kw): pass def test_long_as_double(*args,**kw): pass def test_long_as_size_t(*args,**kw): pass def test_long_long_and_overflow(*args,**kw): pass def test_long_numbits(*args,**kw): pass def test_longlong_api(*args,**kw): pass def test_null_strings(*args,**kw): pass def test_s_code(*args,**kw): pass def test_string_from_format(*args,**kw): pass def test_string_to_double(*args,**kw): pass def test_u_code(*args,**kw): pass def test_unicode_compare_with_ascii(*args,**kw): pass def test_widechar(*args,**kw): pass def test_with_docstring(*args,**kw): """This is a pretty normal docstring.""" pass def traceback_print(*args,**kw): pass def unicode_aswidechar(*args,**kw): pass def unicode_aswidecharstring(*args,**kw): pass def unicode_encodedecimal(*args,**kw): pass def unicode_transformdecimaltoascii(*args,**kw): pass import wx from Tree import Tree from wx.lib.customtreectrl import wxEVT_TREE_ITEM_ACTIVATED class MNotExpandOnDClick(object): def __init__(self): self.Bind(wx.EVT_LEFT_DCLICK, self.__on_left_dclick) def __on_left_dclick(self, event): itemId, flags = self.HitTest(event.GetPosition()) if flags & (wx.TREE_HITTEST_ONITEMLABEL | wx.TREE_HITTEST_ONITEMICON): self.AddPendingEvent(wx.TreeEvent(wxEVT_TREE_ITEM_ACTIVATED, self, self.GetSelection())) else: event.Skip() """ def test(): from TreeNode import TreeNode class MyTreeNode(TreeNode): def __init__(self, id): TreeNode.__init__(self) self.id = id def getId(self): return self.id def children(self): return (MyTreeNode(self.id*10+1), MyTreeNode(self.id*10+2), MyTreeNode(self.id*10+3)) def getText(self): return str(self.id) class MyTree(Tree, MNotExpandOnDClick): def __init__(self, *p, **pp): Tree.__init__(self, *p, **pp) MNotExpandOnDClick.__init__(self) self.Bind(wx.EVT_TREE_SEL_CHANGED, self.__onSelectionChanged) def __onSelectionChanged(self, event): print "SEL CHANGED:", self.getNodeForEvent(event).getId() def roots(self): return (MyTreeNode(1), MyTreeNode(2), MyTreeNode(3)) def oninit(self): self.t = MyTree(self) self.t.refresh() def ondestroy(self): pass def ontimer(self): #self.t.selectIdPath([1, 12]) pass from toolib.wx.TestApp import TestApp TestApp(oninit, ondestroy, ontimer=ontimer).MainLoop() if __name__ == '__main__': test() """ # $Id: pjsua_app.py 4724 2014-01-31 08:52:09Z nanang $ # # Sample and simple Python script to make and receive calls, and do # presence and instant messaging/IM using PJSUA-API binding for Python. # # Copyright (C) 2003-2007 Benny Prijono # import py_pjsua import sys import thread # # Configurations # THIS_FILE = "pjsua_app.py" C_QUIT = 0 C_LOG_LEVEL = 4 # STUN config. # Set C_STUN_HOST to the address:port of the STUN server to enable STUN # C_STUN_HOST = "" #C_STUN_HOST = "192.168.0.2" #C_STUN_HOST = "stun.iptel.org:3478" # SIP port C_SIP_PORT = 5060 # Globals # g_ua_cfg = None g_acc_id = py_pjsua.PJSUA_INVALID_ID g_current_call = py_pjsua.PJSUA_INVALID_ID g_wav_files = [] g_wav_id = 0 g_wav_port = 0 g_rec_file = "" g_rec_id = 0 g_rec_port = 0 # Utility: display PJ error and exit # def err_exit(title, rc): py_pjsua.perror(THIS_FILE, title, rc) py_pjsua.destroy() exit(1) # Logging function (also callback, called by pjsua-lib) # def log_cb(level, str, len): if level <= C_LOG_LEVEL: print str, def write_log(level, str): log_cb(level, str + "\n", 0) # Utility to get call info # def call_name(call_id): ci = py_pjsua.call_get_info(call_id) return "[Call " + `call_id` + " " + ci.remote_info + "]" # Callback when call state has changed. # def on_call_state(call_id, e): global g_current_call ci = py_pjsua.call_get_info(call_id) write_log(3, call_name(call_id) + " state = " + `ci.state_text`) if ci.state == py_pjsua.PJSIP_INV_STATE_DISCONNECTED: g_current_call = py_pjsua.PJSUA_INVALID_ID # Callback for incoming call # def on_incoming_call(acc_id, call_id, rdata): global g_current_call if g_current_call != py_pjsua.PJSUA_INVALID_ID: # There's call in progress - answer Busy py_pjsua.call_answer(call_id, 486, None, None) return g_current_call = call_id ci = py_pjsua.call_get_info(call_id) write_log(3, "*** Incoming call: " + call_name(call_id) + "***") write_log(3, "*** Press a to answer or h to hangup ***") # Callback when media state has changed (e.g. established or terminated) # def on_call_media_state(call_id): ci = py_pjsua.call_get_info(call_id) if ci.media_status == py_pjsua.PJSUA_CALL_MEDIA_ACTIVE: py_pjsua.conf_connect(ci.conf_slot, 0) py_pjsua.conf_connect(0, ci.conf_slot) write_log(3, call_name(call_id) + ": media is active") else: write_log(3, call_name(call_id) + ": media is inactive") # Callback when account registration state has changed # def on_reg_state(acc_id): acc_info = py_pjsua.acc_get_info(acc_id) if acc_info.has_registration != 0: cmd = "registration" else: cmd = "unregistration" if acc_info.status != 0 and acc_info.status != 200: write_log(3, "Account " + cmd + " failed: rc=" + `acc_info.status` + " " + acc_info.status_text) else: write_log(3, "Account " + cmd + " success") # Callback when buddy's presence state has changed # def on_buddy_state(buddy_id): write_log(3, "On Buddy state called") buddy_info = py_pjsua.buddy_get_info(buddy_id) if buddy_info.status != 0 and buddy_info.status != 200: write_log(3, "Status of " + `buddy_info.uri` + " is " + `buddy_info.status_text`) else: write_log(3, "Status : " + `buddy_info.status`) # Callback on incoming pager (MESSAGE) # def on_pager(call_id, strfrom, strto, contact, mime_type, text): write_log(3, "MESSAGE from " + `strfrom` + " : " + `text`) # Callback on the delivery status of outgoing pager (MESSAGE) # def on_pager_status(call_id, strto, body, user_data, status, reason): write_log(3, "MESSAGE to " + `strto` + " status " + `status` + " reason " + `reason`) # Received typing indication # def on_typing(call_id, strfrom, to, contact, is_typing): str_t = "" if is_typing: str_t = "is typing.." else: str_t = "has stopped typing" write_log(3, "IM indication: " + strfrom + " " + str_t) # Received the status of previous call transfer request # def on_call_transfer_status(call_id,status_code,status_text,final,p_cont): strfinal = "" if final == 1: strfinal = "[final]" write_log(3, "Call " + `call_id` + ": transfer status= " + `status_code` + " " + status_text+ " " + strfinal) if status_code/100 == 2: write_log(3, "Call " + `call_id` + " : call transferred successfully, disconnecting call") status = py_pjsua.call_hangup(call_id, 410, None, None) p_cont = 0 # Callback on incoming call transfer request # def on_call_transfer_request(call_id, dst, code): write_log(3, "Call transfer request from " + `call_id` + " to " + dst + " with code " + `code`) # # Initialize pjsua. # def app_init(): global g_acc_id, g_ua_cfg # Create pjsua before anything else status = py_pjsua.create() if status != 0: err_exit("pjsua create() error", status) # Create and initialize logging config log_cfg = py_pjsua.logging_config_default() log_cfg.level = C_LOG_LEVEL log_cfg.cb = log_cb # Create and initialize pjsua config # Note: for this Python module, thread_cnt must be 0 since Python # doesn't like to be called from alien thread (pjsua's thread # in this case) ua_cfg = py_pjsua.config_default() ua_cfg.thread_cnt = 0 ua_cfg.user_agent = "PJSUA/Python 0.1" ua_cfg.cb.on_incoming_call = on_incoming_call ua_cfg.cb.on_call_media_state = on_call_media_state ua_cfg.cb.on_reg_state = on_reg_state ua_cfg.cb.on_call_state = on_call_state ua_cfg.cb.on_buddy_state = on_buddy_state ua_cfg.cb.on_pager = on_pager ua_cfg.cb.on_pager_status = on_pager_status ua_cfg.cb.on_typing = on_typing ua_cfg.cb.on_call_transfer_status = on_call_transfer_status ua_cfg.cb.on_call_transfer_request = on_call_transfer_request # Configure STUN setting if C_STUN_HOST != "": ua_cfg.stun_host = C_STUN_HOST; # Create and initialize media config med_cfg = py_pjsua.media_config_default() med_cfg.ec_tail_len = 0 # # Initialize pjsua!! # status = py_pjsua.init(ua_cfg, log_cfg, med_cfg) if status != 0: err_exit("pjsua init() error", status) # Configure UDP transport config transport_cfg = py_pjsua.transport_config_default() transport_cfg.port = C_SIP_PORT # Create UDP transport status, transport_id = \ py_pjsua.transport_create(py_pjsua.PJSIP_TRANSPORT_UDP, transport_cfg) if status != 0: err_exit("Error creating UDP transport", status) # Create initial default account status, acc_id = py_pjsua.acc_add_local(transport_id, 1) if status != 0: err_exit("Error creating account", status) g_acc_id = acc_id g_ua_cfg = ua_cfg # Add SIP account interractively # def add_account(): global g_acc_id acc_domain = "" acc_username = "" acc_passwd ="" confirm = "" # Input account configs print "Your SIP domain (e.g. myprovider.com): ", acc_domain = sys.stdin.readline() if acc_domain == "\n": return acc_domain = acc_domain.replace("\n", "") print "Your username (e.g. alice): ", acc_username = sys.stdin.readline() if acc_username == "\n": return acc_username = acc_username.replace("\n", "") print "Your password (e.g. secret): ", acc_passwd = sys.stdin.readline() if acc_passwd == "\n": return acc_passwd = acc_passwd.replace("\n", "") # Configure account configuration acc_cfg = py_pjsua.acc_config_default() acc_cfg.id = "sip:" + acc_username + "@" + acc_domain acc_cfg.reg_uri = "sip:" + acc_domain cred_info = py_pjsua.Pjsip_Cred_Info() cred_info.realm = "*" cred_info.scheme = "digest" cred_info.username = acc_username cred_info.data_type = 0 cred_info.data = acc_passwd acc_cfg.cred_info.append(1) acc_cfg.cred_info[0] = cred_info # Add new SIP account status, acc_id = py_pjsua.acc_add(acc_cfg, 1) if status != 0: py_pjsua.perror(THIS_FILE, "Error adding SIP account", status) else: g_acc_id = acc_id write_log(3, "Account " + acc_cfg.id + " added") def add_player(): global g_wav_files global g_wav_id global g_wav_port file_name = "" status = -1 wav_id = 0 print "Enter the path of the file player(e.g. /tmp/audio.wav): ", file_name = sys.stdin.readline() if file_name == "\n": return file_name = file_name.replace("\n", "") status, wav_id = py_pjsua.player_create(file_name, 0) if status != 0: py_pjsua.perror(THIS_FILE, "Error adding file player ", status) else: g_wav_files.append(file_name) if g_wav_id == 0: g_wav_id = wav_id g_wav_port = py_pjsua.player_get_conf_port(wav_id) write_log(3, "File player " + file_name + " added") def add_recorder(): global g_rec_file global g_rec_id global g_rec_port file_name = "" status = -1 rec_id = 0 print "Enter the path of the file recorder(e.g. /tmp/audio.wav): ", file_name = sys.stdin.readline() if file_name == "\n": return file_name = file_name.replace("\n", "") status, rec_id = py_pjsua.recorder_create(file_name, 0, None, 0, 0) if status != 0: py_pjsua.perror(THIS_FILE, "Error adding file recorder ", status) else: g_rec_file = file_name g_rec_id = rec_id g_rec_port = py_pjsua.recorder_get_conf_port(rec_id) write_log(3, "File recorder " + file_name + " added") def conf_list(): ports = None print "Conference ports : " ports = py_pjsua.enum_conf_ports() for port in ports: info = None info = py_pjsua.conf_get_port_info(port) txlist = "" for listener in info.listeners: txlist = txlist + "#" + `listener` + " " print "Port #" + `info.slot_id` + "[" + `(info.clock_rate/1000)` + "KHz/" + `(info.samples_per_frame * 1000 / info.clock_rate)` + "ms] " + info.name + " transmitting to: " + txlist def connect_port(): src_port = 0 dst_port = 0 print "Connect src port # (empty to cancel): " src_port = sys.stdin.readline() if src_port == "\n": return src_port = src_port.replace("\n", "") src_port = int(src_port) print "To dst port # (empty to cancel): " dst_port = sys.stdin.readline() if dst_port == "\n": return dst_port = dst_port.replace("\n", "") dst_port = int(dst_port) status = py_pjsua.conf_connect(src_port, dst_port) if status != 0: py_pjsua.perror(THIS_FILE, "Error connecting port ", status) else: write_log(3, "Port connected from " + `src_port` + " to " + `dst_port`) def disconnect_port(): src_port = 0 dst_port = 0 print "Disconnect src port # (empty to cancel): " src_port = sys.stdin.readline() if src_port == "\n": return src_port = src_port.replace("\n", "") src_port = int(src_port) print "From dst port # (empty to cancel): " dst_port = sys.stdin.readline() if dst_port == "\n": return dst_port = dst_port.replace("\n", "") dst_port = int(dst_port) status = py_pjsua.conf_disconnect(src_port, dst_port) if status != 0: py_pjsua.perror(THIS_FILE, "Error disconnecting port ", status) else: write_log(3, "Port disconnected " + `src_port` + " from " + `dst_port`) def dump_call_quality(): global g_current_call buf = "" if g_current_call != -1: buf = py_pjsua.call_dump(g_current_call, 1, 1024, " ") write_log(3, "\n" + buf) else: write_log(3, "No current call") def xfer_call(): global g_current_call if g_current_call == -1: write_log(3, "No current call") else: call = g_current_call ci = py_pjsua.call_get_info(g_current_call) print "Transferring current call ["+ `g_current_call` + "] " + ci.remote_info print "Enter sip url : " url = sys.stdin.readline() if url == "\n": return url = url.replace("\n", "") if call != g_current_call: print "Call has been disconnected" return msg_data = py_pjsua.msg_data_init() status = py_pjsua.call_xfer(g_current_call, url, msg_data); if status != 0: py_pjsua.perror(THIS_FILE, "Error transferring call ", status) else: write_log(3, "Call transferred to " + url) def xfer_call_replaces(): if g_current_call == -1: write_log(3, "No current call") else: call = g_current_call ids = py_pjsua.enum_calls() if len(ids) <= 1: print "There are no other calls" return ci = py_pjsua.call_get_info(g_current_call) print "Transfer call [" + `g_current_call` + "] " + ci.remote_info + " to one of the following:" for i in range(0, len(ids)): if ids[i] == call: continue call_info = py_pjsua.call_get_info(ids[i]) print `ids[i]` + " " + call_info.remote_info + " [" + call_info.state_text + "]" print "Enter call number to be replaced : " buf = sys.stdin.readline() buf = buf.replace("\n","") if buf == "": return dst_call = int(buf) if call != g_current_call: print "Call has been disconnected" return if dst_call == call: print "Destination call number must not be the same as the call being transferred" return if dst_call >= py_pjsua.PJSUA_MAX_CALLS: print "Invalid destination call number" return if py_pjsua.call_is_active(dst_call) == 0: print "Invalid destination call number" return py_pjsua.call_xfer_replaces(call, dst_call, 0, None) # # Worker thread function. # Python doesn't like it when it's called from an alien thread # (pjsua's worker thread, in this case), so for Python we must # disable worker thread in pjsua and poll pjsua from Python instead. # def worker_thread_main(arg): global C_QUIT thread_desc = 0; status = py_pjsua.thread_register("python worker", thread_desc) if status != 0: py_pjsua.perror(THIS_FILE, "Error registering thread", status) else: while C_QUIT == 0: py_pjsua.handle_events(50) print "Worker thread quitting.." C_QUIT = 2 # Start pjsua # def app_start(): # Done with initialization, start pjsua!! # status = py_pjsua.start() if status != 0: err_exit("Error starting pjsua!", status) # Start worker thread thr = thread.start_new(worker_thread_main, (0,)) print "PJSUA Started!!" # Print account and buddy list def print_acc_buddy_list(): global g_acc_id acc_ids = py_pjsua.enum_accs() print "Account list:" for acc_id in acc_ids: acc_info = py_pjsua.acc_get_info(acc_id) if acc_info.has_registration == 0: acc_status = acc_info.status_text else: acc_status = `acc_info.status` + "/" + acc_info.status_text + " (expires=" + `acc_info.expires` + ")" if acc_id == g_acc_id: print " *", else: print " ", print "[" + `acc_id` + "] " + acc_info.acc_uri + ": " + acc_status print " Presence status: ", if acc_info.online_status != 0: print "Online" else: print "Invisible" if py_pjsua.get_buddy_count() > 0: print "" print "Buddy list:" buddy_ids = py_pjsua.enum_buddies() for buddy_id in buddy_ids: bi = py_pjsua.buddy_get_info(buddy_id) print " [" + `buddy_id` + "] " + bi.status_text + " " + bi.uri # Print application menu # def print_menu(): print "" print ">>>" print_acc_buddy_list() print """ +============================================================================+ | Call Commands : | Buddy, IM & Presence: | Account: | | | | | | m Make call | +b Add buddy | +a Add account | | a Answer current call | -b Delete buddy | -a Delete accnt | | h Hangup current call | | | | H Hold call | i Send instant message | rr register | | v re-inVite (release Hold) | s Subscribe presence | ru Unregister | | # Send DTMF string | u Unsubscribe presence | | | dq Dump curr. call quality | t ToGgle Online status | | | +--------------------------+------------------+ | x Xfer call | Media Commands: | Status: | | X Xfer with Replaces | | | | | cl List ports | d Dump status | | | cc Connect port | dd Dump detail | | | cd Disconnect port | | | | +p Add file player | | |------------------------------+ +r Add file recorder | | | q Quit application | | | +============================================================================+""" print "You have " + `py_pjsua.call_get_count()` + " active call(s)" print ">>>", # Menu # def app_menu(): global g_acc_id global g_current_call quit = 0 while quit == 0: print_menu() choice = sys.stdin.readline() if choice[0] == "q": quit = 1 elif choice[0] == "i": # Sending IM print "Send IM to SIP URL: ", url = sys.stdin.readline() if url == "\n": continue # Send typing indication py_pjsua.im_typing(g_acc_id, url, 1, None) print "The content: ", message = sys.stdin.readline() if message == "\n": py_pjsua.im_typing(g_acc_id, url, 0, None) continue # Send the IM! py_pjsua.im_send(g_acc_id, url, None, message, None, 0) elif choice[0] == "m": # Make call print "Using account ", g_acc_id print "Make call to SIP URL: ", url = sys.stdin.readline() url = url.replace("\n", "") if url == "": continue # Initiate the call! status, call_id = py_pjsua.call_make_call(g_acc_id, url, 0, 0, None) if status != 0: py_pjsua.perror(THIS_FILE, "Error making call", status) else: g_current_call = call_id elif choice[0] == "+" and choice[1] == "b": # Add new buddy bc = py_pjsua.Buddy_Config() print "Buddy URL: ", bc.uri = sys.stdin.readline() if bc.uri == "\n": continue bc.uri = bc.uri.replace("\n", "") bc.subscribe = 1 status, buddy_id = py_pjsua.buddy_add(bc) if status != 0: py_pjsua.perror(THIS_FILE, "Error adding buddy", status) elif choice[0] == "-" and choice[1] == "b": print "Enter buddy ID to delete : " buf = sys.stdin.readline() buf = buf.replace("\n","") if buf == "": continue i = int(buf) if py_pjsua.buddy_is_valid(i) == 0: print "Invalid buddy id " + `i` else: py_pjsua.buddy_del(i) print "Buddy " + `i` + " deleted" elif choice[0] == "+" and choice[1] == "a": # Add account add_account() elif choice[0] == "-" and choice[1] == "a": print "Enter account ID to delete : " buf = sys.stdin.readline() buf = buf.replace("\n","") if buf == "": continue i = int(buf) if py_pjsua.acc_is_valid(i) == 0: print "Invalid account id " + `i` else: py_pjsua.acc_del(i) print "Account " + `i` + " deleted" elif choice[0] == "+" and choice[1] == "p": add_player() elif choice[0] == "+" and choice[1] == "r": add_recorder() elif choice[0] == "c" and choice[1] == "l": conf_list() elif choice[0] == "c" and choice[1] == "c": connect_port() elif choice[0] == "c" and choice[1] == "d": disconnect_port() elif choice[0] == "d" and choice[1] == "q": dump_call_quality() elif choice[0] == "x": xfer_call() elif choice[0] == "X": xfer_call_replaces() elif choice[0] == "h": if g_current_call != py_pjsua.PJSUA_INVALID_ID: py_pjsua.call_hangup(g_current_call, 603, None, None) else: print "No current call" elif choice[0] == "H": if g_current_call != py_pjsua.PJSUA_INVALID_ID: py_pjsua.call_set_hold(g_current_call, None) else: print "No current call" elif choice[0] == "v": if g_current_call != py_pjsua.PJSUA_INVALID_ID: py_pjsua.call_reinvite(g_current_call, 1, None); else: print "No current call" elif choice[0] == "#": if g_current_call == py_pjsua.PJSUA_INVALID_ID: print "No current call" elif py_pjsua.call_has_media(g_current_call) == 0: print "Media is not established yet!" else: call = g_current_call print "DTMF strings to send (0-9*#A-B)" buf = sys.stdin.readline() buf = buf.replace("\n", "") if buf == "": continue if call != g_current_call: print "Call has been disconnected" continue status = py_pjsua.call_dial_dtmf(g_current_call, buf) if status != 0: py_pjsua.perror(THIS_FILE, "Unable to send DTMF", status); else: print "DTMF digits enqueued for transmission" elif choice[0] == "s": print "Subscribe presence of (buddy id) : " buf = sys.stdin.readline() buf = buf.replace("\n","") if buf == "": continue i = int(buf) py_pjsua.buddy_subscribe_pres(i, 1) elif choice[0] == "u": print "Unsubscribe presence of (buddy id) : " buf = sys.stdin.readline() buf = buf.replace("\n","") if buf == "": continue i = int(buf) py_pjsua.buddy_subscribe_pres(i, 0) elif choice[0] == "t": acc_info = py_pjsua.acc_get_info(g_acc_id) if acc_info.online_status == 0: acc_info.online_status = 1 else: acc_info.online_status = 0 py_pjsua.acc_set_online_status(g_acc_id, acc_info.online_status) st = "" if acc_info.online_status == 0: st = "offline" else: st = "online" print "Setting " + acc_info.acc_uri + " online status to " + st elif choice[0] == "r": if choice[1] == "r": py_pjsua.acc_set_registration(g_acc_id, 1) elif choice[1] == "u": py_pjsua.acc_set_registration(g_acc_id, 0) elif choice[0] == "d": py_pjsua.dump(choice[1] == "d") elif choice[0] == "a": if g_current_call != py_pjsua.PJSUA_INVALID_ID: py_pjsua.call_answer(g_current_call, 200, None, None) else: print "No current call" # # main # app_init() app_start() app_menu() # # Done, quitting.. # print "PJSUA shutting down.." C_QUIT = 1 # Give the worker thread chance to quit itself while C_QUIT != 2: py_pjsua.handle_events(50) print "PJSUA destroying.." py_pjsua.destroy() #General Function: def combine_funcs(*funcs): def combined_func(*args, **kwargs): for f in funcs: f(*args, **kwargs) return combined_func #Defining the create function: def create(): def sub_create(): def sub1(): def sub2(): f1.destroy() b=TextArea.get("1.0",'end') if(b!='0'): f.write(b) f.write("\n") f2.destroy() f1=tkinter.Frame(bg="#CCCCFF") f1.pack(fill="both",expand="true") l1=tkinter.Label(f1,text="Congratulations! Your Entry Is Made.",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true") b1=tkinter.Button(f1,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25) b1.pack(side="right") a=e.get() a=a.upper() f1.destroy() f=open(a,"w") f2=tkinter.Frame(bg="#CCCCFF") f2.pack(fill="both",expand="true") l1=tkinter.Label(f2,text="Make An Entry:\n\nExample: Item1 Rs amount\nItem2 Rs amount",bg="#FFFFFF",fg="#680000",relief="groove",anchor="n",pady=50,font=25) l1.pack(fill="x",expand="true") TextArea = tkinter.Text(f2) ScrollBar = tkinter.Scrollbar(f2,bg="white") ScrollBar.config(command=TextArea.yview) TextArea.config(yscrollcommand=ScrollBar.set) ScrollBar.pack(side="right", fill="y") TextArea.pack(fill="x") b1=tkinter.Button(f2,text="Submit",bg="#FFFFFF",fg="blue",relief="groove",command=sub1) b1.pack() frame.destroy() f1=tkinter.Frame(bg="#CCCCFF") f1.pack(fill="both",expand="true") l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true",anchor="n") l1=tkinter.Label(f1,text="Enter Date:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50) l1.place(relx=0.35,rely=0.5) e=tkinter.Entry(f1) e.place(relx=0.52,rely=0.5) b1=tkinter.Button(f1,text="Make Entry",bg="#9999FF",fg="#660099",relief="groove",command=sub_create,height=2,width=10) b1.place(relx=0.45,rely=0.56) #Defining the get function: def get(): def sub_get(): def sub2(): f2.destroy() a=e.get() a=a.upper() f1.destroy() f=open(a,'r') f2=tkinter.Frame() f2.pack(fill="both",expand="true") l1=tkinter.Label(f2,text="Your Entries For The Date "+a+" Are: ",bg="#FFFFFF",fg="#680000",relief="groove",anchor="center",pady=50) l1.pack(fill="x",expand="true") TextArea = tkinter.Text(f2) ScrollBar = tkinter.Scrollbar(f2) ScrollBar.config(command=TextArea.yview) ScrollBar.pack(side="right", fill="y") TextArea.insert('insert',f.read()) TextArea.config(yscrollcommand=ScrollBar.set,state="disabled") TextArea.pack(fill="both",expand="true") b1=tkinter.Button(f2,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25) b1.pack(side="right") frame.destroy() f1=tkinter.Frame(bg="#CCCCFF") f1.pack(fill="both",expand="true") l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true",anchor="n") l1=tkinter.Label(f1,text="Enter Date:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50) l1.place(relx=0.35,rely=0.5) e=tkinter.Entry(f1) e.place(relx=0.52,rely=0.5) b1=tkinter.Button(f1,text="Get Entry",bg="#9999FF",fg="#660099",relief="groove",command=sub_get,height=2,width=10) b1.place(relx=0.45,rely=0.56) #Defining the get entry by month function: def getm(): def sub_getm(): def sub2(): f2.destroy() a=e.get() a=a.upper() f1.destroy() f2=tkinter.Frame() f2.pack(fill="both",expand="true") l1=tkinter.Label(f2,text="Your Entries For The Month Are: ",bg="#FFFFFF",fg="#680000",relief="groove",anchor="center",pady=50) l1.pack(fill="x",expand="true") TextArea = tkinter.Text(f2) ScrollBar = tkinter.Scrollbar(f2) ScrollBar.config(command=TextArea.yview) ScrollBar.pack(side="right", fill="y") for i in range(1,32): try: f=open(str(i)+' '+a,'r') except IOError: continue TextArea.insert('insert',"\n"+str(i)+' '+a+": \n\n"+f.read()) TextArea.config(yscrollcommand=ScrollBar.set,state="disabled") TextArea.pack(fill="both",expand="true") b1=tkinter.Button(f2,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25) b1.pack(side="right") frame.destroy() f1=tkinter.Frame(bg="#CCCCFF") f1.pack(fill="both",expand="true") l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true",anchor="n") l1=tkinter.Label(f1,text="Enter Month And Year:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50) l1.place(relx=0.3,rely=0.5) e=tkinter.Entry(f1) e.place(relx=0.52,rely=0.5) b1=tkinter.Button(f1,text="Get Entry",bg="#9999FF",fg="#660099",relief="groove",command=sub_getm,height=2,width=10) b1.place(relx=0.47,rely=0.56) #Get Total Expense By Date: def Sum(): def sub_Sum(): def sub2(): f2.destroy() t=e.get() t=t.upper() try: f=open(t,'r') a=f.readlines() c=0 for i in range(0,len(a)): b='' for j in range(len(a[i])-1,0,-1): if(a[i][j]!=' ' and a[i][j]>='0' and a[i][j]<='9'): b=b+a[i][j] if(a[i][j]>='9'): break if(b!=''): c=c+int(b[::-1]) f1.destroy() f2=tkinter.Frame() f2.pack(fill="both",expand="true") l1=tkinter.Label(f2,text="Total Expenses On "+t+" Are: \nRs "+str(c),bg="#FFFFFF",fg="#680000",relief="groove",anchor="center",pady=50,font=50) l1.pack(fill="both",expand="true") except IOError: l1=tkinter.Label(f2,text="No Such Entry Is Made "+t+" Are: \nRs "+str(c),bg="#FFFFFF",fg="#680000",relief="groove",anchor="center",pady=50,font=50) l1.pack(fill="both",expand="true") b1=tkinter.Button(f2,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25) b1.pack(side="right") frame.destroy() f1=tkinter.Frame(bg="#CCCCFF") f1.pack(fill="both",expand="true") l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true",anchor="n") l1=tkinter.Label(f1,text="Enter Date:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50) l1.place(relx=0.35,rely=0.5) e=tkinter.Entry(f1) e.place(relx=0.52,rely=0.5) b1=tkinter.Button(f1,text="Get Expenses",bg="#9999FF",fg="#660099",relief="groove",command=sub_Sum,height=2,width=10) b1.place(relx=0.47,rely=0.56) #Defining Get total expense in a month: def Summ(): def sub_Summ(): def sub2(): f2.destroy() a=e.get() a=a.upper() c=0 for i in range(1,32): try: f=open(str(i)+' '+a,'r') t=f.readlines() for i in range(0,len(t)): b='' for j in range(len(t[i])-1,0,-1): if(t[i][j]!=' ' and t[i][j]>='0' and t[i][j]<='9'): b=b+t[i][j] if(b!=''): c=c+int(b[::-1]) except IOError: continue f1.destroy() f2=tkinter.Frame() f2.pack(fill="both",expand="true") l1=tkinter.Label(f2,text="Total Expenses In "+a+" Are: \nRs "+str(c),bg="#FFFFFF",fg="#680000",relief="groove",anchor="center",pady=50,font=50) l1.pack(fill="both",expand="true") b1=tkinter.Button(f2,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25) b1.pack(side="right") frame.destroy() f1=tkinter.Frame(bg="#CCCCFF") f1.pack(fill="both",expand="true") l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true",anchor="n") l1=tkinter.Label(f1,text="Enter Month And Year:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50) l1.place(relx=0.3,rely=0.5) e=tkinter.Entry(f1) e.place(relx=0.52,rely=0.5) b1=tkinter.Button(f1,text="Get Expenses",bg="#9999FF",fg="#660099",relief="groove",command=sub_Summ,height=2,width=10) b1.place(relx=0.47,rely=0.56) # Defining Add to entry: def add(): def sub_add(): def sub1(): def sub2(): f1.destroy() b=TextArea.get("1.0",'end') if(b!='0'): f.write(b) f.write("\n") f2.destroy() f1=tkinter.Frame(bg="#CCCCFF") f1.pack(fill="both",expand="true") l1=tkinter.Label(f1,text="Congratulations! Your Entry Is Made.",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true") b1=tkinter.Button(f1,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25) b1.pack(side="right") a=e.get() a=a.upper() f1.destroy() f=open(a,"a") f2=tkinter.Frame(bg="#CCCCFF") f2.pack(fill="both",expand="true") l1=tkinter.Label(f2,text="Make An Entry:\n\nExample: Item1 Rs amount\nItem2 Rs amount",bg="#FFFFFF",fg="#680000",relief="groove",anchor="n",pady=50,font=25) l1.pack(fill="x",expand="true") TextArea = tkinter.Text(f2) ScrollBar = tkinter.Scrollbar(f2,bg="white") ScrollBar.config(command=TextArea.yview) TextArea.config(yscrollcommand=ScrollBar.set) ScrollBar.pack(side="right", fill="y") TextArea.pack(fill="x") b1=tkinter.Button(f2,text="Submit",bg="#FFFFFF",fg="blue",relief="groove",command=sub1) b1.pack() frame.destroy() f1=tkinter.Frame(bg="#CCCCFF") f1.pack(fill="both",expand="true") l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true",anchor="n") l1=tkinter.Label(f1,text="Enter Date:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50) l1.place(relx=0.35,rely=0.5) e=tkinter.Entry(f1) e.place(relx=0.52,rely=0.5) b1=tkinter.Button(f1,text="Make Entry",bg="#9999FF",fg="#660099",relief="groove",command=sub_add,height=2,width=10) b1.place(relx=0.45,rely=0.56) # Defining Delete Existing Entry: def delete(): import os delete=lambda x:os.remove(x) def sub_delete(): def sub2(): f2.destroy() a=e.get() a=a.upper() delete(a) f1.destroy() f2=tkinter.Frame(bg="#CCCCFF") f2.pack(fill="both",expand="true") l1=tkinter.Label(f2,text="Congratulations! Your Entry Is Deleted.",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true") b1=tkinter.Button(f2,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25) b1.pack(side="right") frame.destroy() f1=tkinter.Frame(bg="#CCCCFF") f1.pack(fill="both",expand="true") l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true",anchor="n") l1=tkinter.Label(f1,text="Enter Date:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50) l1.place(relx=0.35,rely=0.5) e=tkinter.Entry(f1) e.place(relx=0.52,rely=0.5) b1=tkinter.Button(f1,text="Delete",bg="#9999FF",fg="#660099",relief="groove",command=sub_delete,height=2,width=10) b1.place(relx=0.47,rely=0.56) import tkinter root=tkinter.Tk() root.geometry('1250x700') def start(): global frame frame=tkinter.Frame(bg="#CCCCFF") frame.pack(fill="both",expand="true",) l1=tkinter.Label(frame,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true",anchor="n") l1=tkinter.Label(frame,text="Press To Create A new Entry",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16,padx=16) l1.place(relx=0.2,rely=0.2,anchor="n") b1=tkinter.Button(frame,text="Create Entry Sheet",bg="#9999FF",fg="#660099",relief="solid",command=create,font=11,pady=16,cursor="dot") b1.place(relx=0.2,rely=0.3,anchor="n") l1=tkinter.Label(frame,text="Press To Get Entry By Date",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16,padx=16) l1.place(relx=0.4,rely=0.2,anchor="n") b1=tkinter.Button(frame,text="Get Entry Sheet",bg="#9999FF",fg="#660099",relief="solid",command=get,font=11,pady=16,cursor="dot") b1.place(relx=0.4,rely=0.3,anchor="n") l1=tkinter.Label(frame,text="Press To Get Entry By Month",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16,padx=7) l1.place(relx=0.6,rely=0.2,anchor="n") b1=tkinter.Button(frame,text="Get Monthly Entry Sheet",bg="#9999FF",fg="#660099",relief="solid",command=getm,font=11,pady=16,cursor="dot") b1.place(relx=0.6,rely=0.3,anchor="n") l1=tkinter.Label(frame,text="Press To Get Total Expense On Date",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16) l1.place(relx=0.8,rely=0.2,anchor="n") b1=tkinter.Button(frame,text="Get Total Expense On Date",bg="#9999FF",fg="#660099",relief="solid",command=Sum,font=11,pady=16,cursor="dot") b1.place(relx=0.8,rely=0.3,anchor="n") l1=tkinter.Label(frame,text="Press To Get Total Expense In A Month",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16) l1.place(relx=0.3,rely=0.5,anchor="n") b1=tkinter.Button(frame,text="Get Total Expense",bg="#9999FF",fg="#660099",relief="solid",command=Summ,font=11,pady=16,cursor="dot") b1.place(relx=0.3,rely=0.6,anchor="n") l1=tkinter.Label(frame,text="Press To Add To Existing Entry",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16) l1.place(relx=0.51,rely=0.5,anchor="n") b1=tkinter.Button(frame,text="Add To Entry",bg="#9999FF",fg="#660099",relief="solid",command=add,font=11,pady=16,cursor="dot") b1.place(relx=0.51,rely=0.6,anchor="n") l1=tkinter.Label(frame,text="Press To Delete Existing Entry",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16) l1.place(relx=0.7,rely=0.5,anchor="n") b1=tkinter.Button(frame,text="Delete Entry",bg="#9999FF",fg="#660099",relief="solid",command=delete,font=11,pady=16,cursor="dot") b1.place(relx=0.7,rely=0.6,anchor="n") l1=tkinter.Label(frame,text="\u00a9"+" copyright 2016\t\t\t\t\t\t\t\t\t\t\t\t"+"Developed By: Akshit Grover",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16) l1.pack(fill="x",expand="true",anchor="s") root.mainloop() start() #!/usr/bin/env python # This file is part of tcollector. # Copyright (C) 2013 The tcollector Authors. # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or (at your # option) any later version. This program is distributed in the hope that it # will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty # of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser # General Public License for more details. You should have received a copy # of the GNU Lesser General Public License along with this program. If not, # see . """ Collector for PostgreSQL. Please, set login/password at etc/postgresql.conf . Collector uses socket file for DB connection so set 'unix_socket_directory' at postgresql.conf . """ import sys import os import time import socket import errno try: import psycopg2 except ImportError: psycopg2 = None # handled in main() COLLECTION_INTERVAL = 15 # seconds CONNECT_TIMEOUT = 2 # seconds from collectors.lib import utils from collectors.etc import postgresqlconf # Directories under which to search socket files SEARCH_DIRS = frozenset([ "/var/run/postgresql", # Debian default "/var/pgsql_socket", # MacOS default "/usr/local/var/postgres", # custom compilation "/tmp", # custom compilation ]) def find_sockdir(): """Returns a path to PostgreSQL socket file to monitor.""" for dir in SEARCH_DIRS: for dirpath, dirnames, dirfiles in os.walk(dir, followlinks=True): for name in dirfiles: # ensure selection of PostgreSQL socket only if (utils.is_sockfile(os.path.join(dirpath, name)) and "PGSQL" in name): return(dirpath) def postgres_connect(sockdir): """Connects to the PostgreSQL server using the specified socket file.""" user, password = postgresqlconf.get_user_password() try: return psycopg2.connect("host='%s' user='%s' password='%s' " "connect_timeout='%s' dbname=postgres" % (sockdir, user, password, CONNECT_TIMEOUT)) except (EnvironmentError, EOFError, RuntimeError, socket.error), e: utils.err("Couldn't connect to DB :%s" % (e)) def collect(db): """ Collects and prints stats. Here we collect only general info, for full list of data for collection see http://www.postgresql.org/docs/9.2/static/monitoring-stats.html """ try: cursor = db.cursor() # general statics cursor.execute("SELECT pg_stat_database.*, pg_database_size" " (pg_database.datname) AS size FROM pg_database JOIN" " pg_stat_database ON pg_database.datname =" " pg_stat_database.datname WHERE pg_stat_database.datname" " NOT IN ('template0', 'template1', 'postgres')") ts = time.time() stats = cursor.fetchall() # datid | datname | numbackends | xact_commit | xact_rollback | blks_read | blks_hit | tup_returned | tup_fetched | tup_inserted | tup_updated | tup_deleted | conflicts | temp_files | temp_bytes | deadlocks | blk_read_time | blk_write_time | stats_reset | size result = {} for stat in stats: database = stat[1] result[database] = stat for database in result: for i in range(2,len(cursor.description)): metric = cursor.description[i].name value = result[database][i] try: if metric in ("stats_reset"): continue print ("postgresql.%s %i %s database=%s" % (metric, ts, value, database)) except: err("got here") continue # connections cursor.execute("SELECT datname, count(datname) FROM pg_stat_activity" " GROUP BY pg_stat_activity.datname") ts = time.time() connections = cursor.fetchall() for database, connection in connections: print ("postgresql.connections %i %s database=%s" % (ts, connection, database)) except (EnvironmentError, EOFError, RuntimeError, socket.error), e: if isinstance(e, IOError) and e[0] == errno.EPIPE: # exit on a broken pipe. There is no point in continuing # because no one will read our stdout anyway. return 2 utils.err("error: failed to collect data: %s" % e) def main(args): """Collects and dumps stats from a PostgreSQL server.""" if psycopg2 is None: utils.err("error: Python module 'psycopg2' is missing") return 13 # Ask tcollector to not respawn us sockdir = find_sockdir() if not sockdir: # Nothing to monitor utils.err("error: Can't find postgresql socket file") return 13 # Ask tcollector to not respawn us db = postgres_connect(sockdir) db.autocommit=True while True: collect(db) sys.stdout.flush() time.sleep(COLLECTION_INTERVAL) if __name__ == "__main__": sys.stdin.close() sys.exit(main(sys.argv)) # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest_lib.common.utils import data_utils from neutron.tests.api import base from neutron.tests.tempest import exceptions from neutron.tests.tempest import test AGENT_TYPE = 'L3 agent' AGENT_MODES = ( 'legacy', 'dvr_snat' ) class L3AgentSchedulerTestJSON(base.BaseAdminNetworkTest): _agent_mode = 'legacy' """ Tests the following operations in the Neutron API using the REST client for Neutron: List routers that the given L3 agent is hosting. List L3 agents hosting the given router. Add and Remove Router to L3 agent v2.0 of the Neutron API is assumed. The l3_agent_scheduler extension is required for these tests. """ @classmethod def skip_checks(cls): super(L3AgentSchedulerTestJSON, cls).skip_checks() if not test.is_extension_enabled('l3_agent_scheduler', 'network'): msg = "L3 Agent Scheduler Extension not enabled." raise cls.skipException(msg) @classmethod def resource_setup(cls): super(L3AgentSchedulerTestJSON, cls).resource_setup() body = cls.admin_client.list_agents() agents = body['agents'] for agent in agents: # TODO(armax): falling back on default _agent_mode can be # dropped as soon as Icehouse is dropped. agent_mode = ( agent['configurations'].get('agent_mode', cls._agent_mode)) if agent['agent_type'] == AGENT_TYPE and agent_mode in AGENT_MODES: cls.agent = agent break else: msg = "L3 Agent Scheduler enabled in conf, but L3 Agent not found" raise exceptions.InvalidConfiguration(msg) cls.router = cls.create_router(data_utils.rand_name('router')) # NOTE(armax): If DVR is an available extension, and the created router # is indeed a distributed one, more resources need to be provisioned # in order to bind the router to the L3 agent. # That said, let's preserve the existing test logic, where the extra # query and setup steps are only required if the extension is available # and only if the router's default type is distributed. if test.is_extension_enabled('dvr', 'network'): is_dvr_router = cls.admin_client.show_router( cls.router['id'])['router'].get('distributed', False) if is_dvr_router: cls.network = cls.create_network() cls.create_subnet(cls.network) cls.port = cls.create_port(cls.network) cls.client.add_router_interface_with_port_id( cls.router['id'], cls.port['id']) @test.attr(type='smoke') @test.idempotent_id('b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a') def test_list_routers_on_l3_agent(self): self.admin_client.list_routers_on_l3_agent(self.agent['id']) @test.attr(type='smoke') @test.idempotent_id('9464e5e7-8625-49c3-8fd1-89c52be59d66') def test_add_list_remove_router_on_l3_agent(self): l3_agent_ids = list() self.admin_client.add_router_to_l3_agent( self.agent['id'], self.router['id']) body = ( self.admin_client.list_l3_agents_hosting_router(self.router['id'])) for agent in body['agents']: l3_agent_ids.append(agent['id']) self.assertIn('agent_type', agent) self.assertEqual('L3 agent', agent['agent_type']) self.assertIn(self.agent['id'], l3_agent_ids) body = self.admin_client.remove_router_from_l3_agent( self.agent['id'], self.router['id']) # NOTE(afazekas): The deletion not asserted, because neutron # is not forbidden to reschedule the router to the same agent # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2012-today OpenERP SA () # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see # ############################################################################## from openerp.osv import osv, fields from openerp.tools.safe_eval import safe_eval class base_config_settings(osv.TransientModel): _inherit = 'base.config.settings' _columns = { 'auth_signup_reset_password': fields.boolean('Enable password reset from Login page', help="This allows users to trigger a password reset from the Login page."), 'auth_signup_uninvited': fields.boolean('Allow external users to sign up', help="If unchecked, only invited users may sign up."), 'auth_signup_template_user_id': fields.many2one('res.users', string='Template user for new users created through signup'), } def get_default_auth_signup_template_user_id(self, cr, uid, fields, context=None): icp = self.pool.get('ir.config_parameter') # we use safe_eval on the result, since the value of the parameter is a nonempty string return { 'auth_signup_reset_password': safe_eval(icp.get_param(cr, uid, 'auth_signup.reset_password', 'False')), 'auth_signup_uninvited': safe_eval(icp.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')), 'auth_signup_template_user_id': safe_eval(icp.get_param(cr, uid, 'auth_signup.template_user_id', 'False')), } def set_auth_signup_template_user_id(self, cr, uid, ids, context=None): config = self.browse(cr, uid, ids[0], context=context) icp = self.pool.get('ir.config_parameter') # we store the repr of the values, since the value of the parameter is a required string icp.set_param(cr, uid, 'auth_signup.reset_password', repr(config.auth_signup_reset_password)) icp.set_param(cr, uid, 'auth_signup.allow_uninvited', repr(config.auth_signup_uninvited)) icp.set_param(cr, uid, 'auth_signup.template_user_id', repr(config.auth_signup_template_user_id.id)) # coding: utf-8 # DO NOT EDIT # Autogenerated from the notebook contrasts.ipynb. # Edit the notebook and then sync the output with this file. # # flake8: noqa # DO NOT EDIT # # Contrasts Overview import numpy as np import statsmodels.api as sm # This document is based heavily on this excellent resource from UCLA # http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm # A categorical variable of K categories, or levels, usually enters a # regression as a sequence of K-1 dummy variables. This amounts to a linear # hypothesis on the level means. That is, each test statistic for these # variables amounts to testing whether the mean for that level is # statistically significantly different from the mean of the base category. # This dummy coding is called Treatment coding in R parlance, and we will # follow this convention. There are, however, different coding methods that # amount to different sets of linear hypotheses. # # In fact, the dummy coding is not technically a contrast coding. This is # because the dummy variables add to one and are not functionally # independent of the model's intercept. On the other hand, a set of # *contrasts* for a categorical variable with `k` levels is a set of `k-1` # functionally independent linear combinations of the factor level means # that are also independent of the sum of the dummy variables. The dummy # coding is not wrong *per se*. It captures all of the coefficients, but it # complicates matters when the model assumes independence of the # coefficients such as in ANOVA. Linear regression models do not assume # independence of the coefficients and thus dummy coding is often the only # coding that is taught in this context. # # To have a look at the contrast matrices in Patsy, we will use data from # UCLA ATS. First let's load the data. # #### Example Data import pandas as pd url = 'https://stats.idre.ucla.edu/stat/data/hsb2.csv' hsb2 = pd.read_table(url, delimiter=",") hsb2.head(10) # It will be instructive to look at the mean of the dependent variable, # write, for each level of race ((1 = Hispanic, 2 = Asian, 3 = African # American and 4 = Caucasian)). hsb2.groupby('race')['write'].mean() # #### Treatment (Dummy) Coding # Dummy coding is likely the most well known coding scheme. It compares # each level of the categorical variable to a base reference level. The base # reference level is the value of the intercept. It is the default contrast # in Patsy for unordered categorical factors. The Treatment contrast matrix # for race would be from patsy.contrasts import Treatment levels = [1, 2, 3, 4] contrast = Treatment(reference=0).code_without_intercept(levels) print(contrast.matrix) # Here we used `reference=0`, which implies that the first level, # Hispanic, is the reference category against which the other level effects # are measured. As mentioned above, the columns do not sum to zero and are # thus not independent of the intercept. To be explicit, let's look at how # this would encode the `race` variable. hsb2.race.head(10) print(contrast.matrix[hsb2.race - 1, :][:20]) sm.categorical(hsb2.race.values) # This is a bit of a trick, as the `race` category conveniently maps to # zero-based indices. If it does not, this conversion happens under the # hood, so this will not work in general but nonetheless is a useful exercise # to fix ideas. The below illustrates the output using the three contrasts # above from statsmodels.formula.api import ols mod = ols("write ~ C(race, Treatment)", data=hsb2) res = mod.fit() print(res.summary()) # We explicitly gave the contrast for race; however, since Treatment is # the default, we could have omitted this. # ### Simple Coding # Like Treatment Coding, Simple Coding compares each level to a fixed # reference level. However, with simple coding, the intercept is the grand # mean of all the levels of the factors. Patsy does not have the Simple # contrast included, but you can easily define your own contrasts. To do so, # write a class that contains a code_with_intercept and a # code_without_intercept method that returns a patsy.contrast.ContrastMatrix # instance from patsy.contrasts import ContrastMatrix def _name_levels(prefix, levels): return ["[%s%s]" % (prefix, level) for level in levels] class Simple(object): def _simple_contrast(self, levels): nlevels = len(levels) contr = -1. / nlevels * np.ones((nlevels, nlevels - 1)) contr[1:][np.diag_indices(nlevels - 1)] = (nlevels - 1.) / nlevels return contr def code_with_intercept(self, levels): contrast = np.column_stack((np.ones(len(levels)), self._simple_contrast(levels))) return ContrastMatrix(contrast, _name_levels("Simp.", levels)) def code_without_intercept(self, levels): contrast = self._simple_contrast(levels) return ContrastMatrix(contrast, _name_levels("Simp.", levels[:-1])) hsb2.groupby('race')['write'].mean().mean() contrast = Simple().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Simple)", data=hsb2) res = mod.fit() print(res.summary()) # ### Sum (Deviation) Coding # Sum coding compares the mean of the dependent variable for a given level # to the overall mean of the dependent variable over all the levels. That # is, it uses contrasts between each of the first k-1 levels and level k In # this example, level 1 is compared to all the others, level 2 to all the # others, and level 3 to all the others. from patsy.contrasts import Sum contrast = Sum().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Sum)", data=hsb2) res = mod.fit() print(res.summary()) # This corresponds to a parameterization that forces all the coefficients # to sum to zero. Notice that the intercept here is the grand mean where the # grand mean is the mean of means of the dependent variable by each level. hsb2.groupby('race')['write'].mean().mean() # ### Backward Difference Coding # In backward difference coding, the mean of the dependent variable for a # level is compared with the mean of the dependent variable for the prior # level. This type of coding may be useful for a nominal or an ordinal # variable. from patsy.contrasts import Diff contrast = Diff().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Diff)", data=hsb2) res = mod.fit() print(res.summary()) # For example, here the coefficient on level 1 is the mean of `write` at # level 2 compared with the mean at level 1. Ie., res.params["C(race, Diff)[D.1]"] hsb2.groupby('race').mean()["write"][2] - hsb2.groupby( 'race').mean()["write"][1] # ### Helmert Coding # Our version of Helmert coding is sometimes referred to as Reverse # Helmert Coding. The mean of the dependent variable for a level is compared # to the mean of the dependent variable over all previous levels. Hence, the # name 'reverse' being sometimes applied to differentiate from forward # Helmert coding. This comparison does not make much sense for a nominal # variable such as race, but we would use the Helmert contrast like so: from patsy.contrasts import Helmert contrast = Helmert().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Helmert)", data=hsb2) res = mod.fit() print(res.summary()) # To illustrate, the comparison on level 4 is the mean of the dependent # variable at the previous three levels taken from the mean at level 4 grouped = hsb2.groupby('race') grouped.mean()["write"][4] - grouped.mean()["write"][:3].mean() # As you can see, these are only equal up to a constant. Other versions of # the Helmert contrast give the actual difference in means. Regardless, the # hypothesis tests are the same. k = 4 1. / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean()) k = 3 1. / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean()) # ### Orthogonal Polynomial Coding # The coefficients taken on by polynomial coding for `k=4` levels are the # linear, quadratic, and cubic trends in the categorical variable. The # categorical variable here is assumed to be represented by an underlying, # equally spaced numeric variable. Therefore, this type of encoding is used # only for ordered categorical variables with equal spacing. In general, the # polynomial contrast produces polynomials of order `k-1`. Since `race` is # not an ordered factor variable let's use `read` as an example. First we # need to create an ordered categorical from `read`. hsb2['readcat'] = np.asarray(pd.cut(hsb2.read, bins=3)) hsb2.groupby('readcat').mean()['write'] from patsy.contrasts import Poly levels = hsb2.readcat.unique().tolist() contrast = Poly().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(readcat, Poly)", data=hsb2) res = mod.fit() print(res.summary()) # As you can see, readcat has a significant linear effect on the dependent # variable `write` but not a significant quadratic or cubic effect. from __future__ import absolute_import, division, print_function import pytest import sys from _pytest.skipping import MarkEvaluator, folded_skips, pytest_runtest_setup from _pytest.runner import runtestprotocol class TestEvaluator(object): def test_no_marker(self, testdir): item = testdir.getitem("def test_func(): pass") evalskipif = MarkEvaluator(item, "skipif") assert not evalskipif assert not evalskipif.istrue() def test_marked_no_args(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.xyz def test_func(): pass """ ) ev = MarkEvaluator(item, "xyz") assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "" assert not ev.get("run", False) def test_marked_one_arg(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.xyz("hasattr(os, 'sep')") def test_func(): pass """ ) ev = MarkEvaluator(item, "xyz") assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: hasattr(os, 'sep')" @pytest.mark.skipif("sys.version_info[0] >= 3") def test_marked_one_arg_unicode(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.xyz(u"hasattr(os, 'sep')") def test_func(): pass """ ) ev = MarkEvaluator(item, "xyz") assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: hasattr(os, 'sep')" def test_marked_one_arg_with_reason(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world") def test_func(): pass """ ) ev = MarkEvaluator(item, "xyz") assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "hello world" assert ev.get("attr") == 2 def test_marked_one_arg_twice(self, testdir): lines = [ """@pytest.mark.skipif("not hasattr(os, 'murks')")""", """@pytest.mark.skipif("hasattr(os, 'murks')")""", ] for i in range(0, 2): item = testdir.getitem( """ import pytest %s %s def test_func(): pass """ % (lines[i], lines[(i + 1) % 2]) ) ev = MarkEvaluator(item, "skipif") assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: not hasattr(os, 'murks')" def test_marked_one_arg_twice2(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.skipif("hasattr(os, 'murks')") @pytest.mark.skipif("not hasattr(os, 'murks')") def test_func(): pass """ ) ev = MarkEvaluator(item, "skipif") assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: not hasattr(os, 'murks')" def test_marked_skip_with_not_string(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.skipif(False) def test_func(): pass """ ) ev = MarkEvaluator(item, "skipif") exc = pytest.raises(pytest.fail.Exception, ev.istrue) assert """Failed: you need to specify reason=STRING when using booleans as conditions.""" in exc.value.msg def test_skipif_class(self, testdir): item, = testdir.getitems( """ import pytest class TestClass(object): pytestmark = pytest.mark.skipif("config._hackxyz") def test_func(self): pass """ ) item.config._hackxyz = 3 ev = MarkEvaluator(item, "skipif") assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: config._hackxyz" class TestXFail(object): @pytest.mark.parametrize("strict", [True, False]) def test_xfail_simple(self, testdir, strict): item = testdir.getitem( """ import pytest @pytest.mark.xfail(strict=%s) def test_func(): assert 0 """ % strict ) reports = runtestprotocol(item, log=False) assert len(reports) == 3 callreport = reports[1] assert callreport.skipped assert callreport.wasxfail == "" def test_xfail_xpassed(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.xfail(reason="this is an xfail") def test_func(): assert 1 """ ) reports = runtestprotocol(item, log=False) assert len(reports) == 3 callreport = reports[1] assert callreport.passed assert callreport.wasxfail == "this is an xfail" def test_xfail_using_platform(self, testdir): """ Verify that platform can be used with xfail statements. """ item = testdir.getitem( """ import pytest @pytest.mark.xfail("platform.platform() == platform.platform()") def test_func(): assert 0 """ ) reports = runtestprotocol(item, log=False) assert len(reports) == 3 callreport = reports[1] assert callreport.wasxfail def test_xfail_xpassed_strict(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.xfail(strict=True, reason="nope") def test_func(): assert 1 """ ) reports = runtestprotocol(item, log=False) assert len(reports) == 3 callreport = reports[1] assert callreport.failed assert callreport.longrepr == "[XPASS(strict)] nope" assert not hasattr(callreport, "wasxfail") def test_xfail_run_anyway(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.xfail def test_func(): assert 0 def test_func2(): pytest.xfail("hello") """ ) result = testdir.runpytest("--runxfail") result.stdout.fnmatch_lines( ["*def test_func():*", "*assert 0*", "*1 failed*1 pass*"] ) def test_xfail_evalfalse_but_fails(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.xfail('False') def test_func(): assert 0 """ ) reports = runtestprotocol(item, log=False) callreport = reports[1] assert callreport.failed assert not hasattr(callreport, "wasxfail") assert "xfail" in callreport.keywords def test_xfail_not_report_default(self, testdir): p = testdir.makepyfile( test_one=""" import pytest @pytest.mark.xfail def test_this(): assert 0 """ ) testdir.runpytest(p, "-v") # result.stdout.fnmatch_lines([ # "*HINT*use*-r*" # ]) def test_xfail_not_run_xfail_reporting(self, testdir): p = testdir.makepyfile( test_one=""" import pytest @pytest.mark.xfail(run=False, reason="noway") def test_this(): assert 0 @pytest.mark.xfail("True", run=False) def test_this_true(): assert 0 @pytest.mark.xfail("False", run=False, reason="huh") def test_this_false(): assert 1 """ ) result = testdir.runpytest(p, "-rx") result.stdout.fnmatch_lines( [ "*test_one*test_this*", "*NOTRUN*noway", "*test_one*test_this_true*", "*NOTRUN*condition:*True*", "*1 passed*", ] ) def test_xfail_not_run_no_setup_run(self, testdir): p = testdir.makepyfile( test_one=""" import pytest @pytest.mark.xfail(run=False, reason="hello") def test_this(): assert 0 def setup_module(mod): raise ValueError(42) """ ) result = testdir.runpytest(p, "-rx") result.stdout.fnmatch_lines( ["*test_one*test_this*", "*NOTRUN*hello", "*1 xfailed*"] ) def test_xfail_xpass(self, testdir): p = testdir.makepyfile( test_one=""" import pytest @pytest.mark.xfail def test_that(): assert 1 """ ) result = testdir.runpytest(p, "-rX") result.stdout.fnmatch_lines(["*XPASS*test_that*", "*1 xpassed*"]) assert result.ret == 0 def test_xfail_imperative(self, testdir): p = testdir.makepyfile( """ import pytest def test_this(): pytest.xfail("hello") """ ) result = testdir.runpytest(p) result.stdout.fnmatch_lines(["*1 xfailed*"]) result = testdir.runpytest(p, "-rx") result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"]) result = testdir.runpytest(p, "--runxfail") result.stdout.fnmatch_lines("*1 pass*") def test_xfail_imperative_in_setup_function(self, testdir): p = testdir.makepyfile( """ import pytest def setup_function(function): pytest.xfail("hello") def test_this(): assert 0 """ ) result = testdir.runpytest(p) result.stdout.fnmatch_lines(["*1 xfailed*"]) result = testdir.runpytest(p, "-rx") result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"]) result = testdir.runpytest(p, "--runxfail") result.stdout.fnmatch_lines( """ *def test_this* *1 fail* """ ) def xtest_dynamic_xfail_set_during_setup(self, testdir): p = testdir.makepyfile( """ import pytest def setup_function(function): pytest.mark.xfail(function) def test_this(): assert 0 def test_that(): assert 1 """ ) result = testdir.runpytest(p, "-rxX") result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*XPASS*test_that*"]) def test_dynamic_xfail_no_run(self, testdir): p = testdir.makepyfile( """ import pytest @pytest.fixture def arg(request): request.applymarker(pytest.mark.xfail(run=False)) def test_this(arg): assert 0 """ ) result = testdir.runpytest(p, "-rxX") result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*NOTRUN*"]) def test_dynamic_xfail_set_during_funcarg_setup(self, testdir): p = testdir.makepyfile( """ import pytest @pytest.fixture def arg(request): request.applymarker(pytest.mark.xfail) def test_this2(arg): assert 0 """ ) result = testdir.runpytest(p) result.stdout.fnmatch_lines(["*1 xfailed*"]) @pytest.mark.parametrize( "expected, actual, matchline", [ ("TypeError", "TypeError", "*1 xfailed*"), ("(AttributeError, TypeError)", "TypeError", "*1 xfailed*"), ("TypeError", "IndexError", "*1 failed*"), ("(AttributeError, TypeError)", "IndexError", "*1 failed*"), ], ) def test_xfail_raises(self, expected, actual, matchline, testdir): p = testdir.makepyfile( """ import pytest @pytest.mark.xfail(raises=%s) def test_raises(): raise %s() """ % (expected, actual) ) result = testdir.runpytest(p) result.stdout.fnmatch_lines([matchline]) def test_strict_sanity(self, testdir): """sanity check for xfail(strict=True): a failing test should behave exactly like a normal xfail. """ p = testdir.makepyfile( """ import pytest @pytest.mark.xfail(reason='unsupported feature', strict=True) def test_foo(): assert 0 """ ) result = testdir.runpytest(p, "-rxX") result.stdout.fnmatch_lines(["*XFAIL*", "*unsupported feature*"]) assert result.ret == 0 @pytest.mark.parametrize("strict", [True, False]) def test_strict_xfail(self, testdir, strict): p = testdir.makepyfile( """ import pytest @pytest.mark.xfail(reason='unsupported feature', strict=%s) def test_foo(): with open('foo_executed', 'w'): pass # make sure test executes """ % strict ) result = testdir.runpytest(p, "-rxX") if strict: result.stdout.fnmatch_lines( ["*test_foo*", "*XPASS(strict)*unsupported feature*"] ) else: result.stdout.fnmatch_lines( [ "*test_strict_xfail*", "XPASS test_strict_xfail.py::test_foo unsupported feature", ] ) assert result.ret == (1 if strict else 0) assert testdir.tmpdir.join("foo_executed").isfile() @pytest.mark.parametrize("strict", [True, False]) def test_strict_xfail_condition(self, testdir, strict): p = testdir.makepyfile( """ import pytest @pytest.mark.xfail(False, reason='unsupported feature', strict=%s) def test_foo(): pass """ % strict ) result = testdir.runpytest(p, "-rxX") result.stdout.fnmatch_lines("*1 passed*") assert result.ret == 0 @pytest.mark.parametrize("strict", [True, False]) def test_xfail_condition_keyword(self, testdir, strict): p = testdir.makepyfile( """ import pytest @pytest.mark.xfail(condition=False, reason='unsupported feature', strict=%s) def test_foo(): pass """ % strict ) result = testdir.runpytest(p, "-rxX") result.stdout.fnmatch_lines("*1 passed*") assert result.ret == 0 @pytest.mark.parametrize("strict_val", ["true", "false"]) def test_strict_xfail_default_from_file(self, testdir, strict_val): testdir.makeini( """ [pytest] xfail_strict = %s """ % strict_val ) p = testdir.makepyfile( """ import pytest @pytest.mark.xfail(reason='unsupported feature') def test_foo(): pass """ ) result = testdir.runpytest(p, "-rxX") strict = strict_val == "true" result.stdout.fnmatch_lines("*1 failed*" if strict else "*1 xpassed*") assert result.ret == (1 if strict else 0) class TestXFailwithSetupTeardown(object): def test_failing_setup_issue9(self, testdir): testdir.makepyfile( """ import pytest def setup_function(func): assert 0 @pytest.mark.xfail def test_func(): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 xfail*"]) def test_failing_teardown_issue9(self, testdir): testdir.makepyfile( """ import pytest def teardown_function(func): assert 0 @pytest.mark.xfail def test_func(): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 xfail*"]) class TestSkip(object): def test_skip_class(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.skip class TestSomething(object): def test_foo(self): pass def test_bar(self): pass def test_baz(): pass """ ) rec = testdir.inline_run() rec.assertoutcome(skipped=2, passed=1) def test_skips_on_false_string(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.skip('False') def test_foo(): pass """ ) rec = testdir.inline_run() rec.assertoutcome(skipped=1) def test_arg_as_reason(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.skip('testing stuff') def test_bar(): pass """ ) result = testdir.runpytest("-rs") result.stdout.fnmatch_lines(["*testing stuff*", "*1 skipped*"]) def test_skip_no_reason(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.skip def test_foo(): pass """ ) result = testdir.runpytest("-rs") result.stdout.fnmatch_lines(["*unconditional skip*", "*1 skipped*"]) def test_skip_with_reason(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.skip(reason="for lolz") def test_bar(): pass """ ) result = testdir.runpytest("-rs") result.stdout.fnmatch_lines(["*for lolz*", "*1 skipped*"]) def test_only_skips_marked_test(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.skip def test_foo(): pass @pytest.mark.skip(reason="nothing in particular") def test_bar(): pass def test_baz(): assert True """ ) result = testdir.runpytest("-rs") result.stdout.fnmatch_lines(["*nothing in particular*", "*1 passed*2 skipped*"]) def test_strict_and_skip(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.skip def test_hello(): pass """ ) result = testdir.runpytest("-rs") result.stdout.fnmatch_lines(["*unconditional skip*", "*1 skipped*"]) class TestSkipif(object): def test_skipif_conditional(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.skipif("hasattr(os, 'sep')") def test_func(): pass """ ) x = pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item)) assert x.value.msg == "condition: hasattr(os, 'sep')" @pytest.mark.parametrize( "params", ["\"hasattr(sys, 'platform')\"", 'True, reason="invalid platform"'] ) def test_skipif_reporting(self, testdir, params): p = testdir.makepyfile( test_foo=""" import pytest @pytest.mark.skipif(%(params)s) def test_that(): assert 0 """ % dict(params=params) ) result = testdir.runpytest(p, "-s", "-rs") result.stdout.fnmatch_lines(["*SKIP*1*test_foo.py*platform*", "*1 skipped*"]) assert result.ret == 0 def test_skipif_using_platform(self, testdir): item = testdir.getitem( """ import pytest @pytest.mark.skipif("platform.platform() == platform.platform()") def test_func(): pass """ ) pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item)) @pytest.mark.parametrize( "marker, msg1, msg2", [("skipif", "SKIP", "skipped"), ("xfail", "XPASS", "xpassed")], ) def test_skipif_reporting_multiple(self, testdir, marker, msg1, msg2): testdir.makepyfile( test_foo=""" import pytest @pytest.mark.{marker}(False, reason='first_condition') @pytest.mark.{marker}(True, reason='second_condition') def test_foobar(): assert 1 """.format( marker=marker ) ) result = testdir.runpytest("-s", "-rsxX") result.stdout.fnmatch_lines( [ "*{msg1}*test_foo.py*second_condition*".format(msg1=msg1), "*1 {msg2}*".format(msg2=msg2), ] ) assert result.ret == 0 def test_skip_not_report_default(testdir): p = testdir.makepyfile( test_one=""" import pytest def test_this(): pytest.skip("hello") """ ) result = testdir.runpytest(p, "-v") result.stdout.fnmatch_lines( [ # "*HINT*use*-r*", "*1 skipped*" ] ) def test_skipif_class(testdir): p = testdir.makepyfile( """ import pytest class TestClass(object): pytestmark = pytest.mark.skipif("True") def test_that(self): assert 0 def test_though(self): assert 0 """ ) result = testdir.runpytest(p) result.stdout.fnmatch_lines(["*2 skipped*"]) def test_skip_reasons_folding(): path = "xyz" lineno = 3 message = "justso" longrepr = (path, lineno, message) class X(object): pass ev1 = X() ev1.when = "execute" ev1.skipped = True ev1.longrepr = longrepr ev2 = X() ev2.when = "execute" ev2.longrepr = longrepr ev2.skipped = True # ev3 might be a collection report ev3 = X() ev3.longrepr = longrepr ev3.skipped = True values = folded_skips([ev1, ev2, ev3]) assert len(values) == 1 num, fspath, lineno, reason = values[0] assert num == 3 assert fspath == path assert lineno == lineno assert reason == message def test_skipped_reasons_functional(testdir): testdir.makepyfile( test_one=""" from conftest import doskip def setup_function(func): doskip() def test_func(): pass class TestClass(object): def test_method(self): doskip() """, conftest=""" import pytest def doskip(): pytest.skip('test') """, ) result = testdir.runpytest("-rs") result.stdout.fnmatch_lines(["*SKIP*2*conftest.py:4: test"]) assert result.ret == 0 def test_skipped_folding(testdir): testdir.makepyfile( test_one=""" import pytest pytestmark = pytest.mark.skip("Folding") def setup_function(func): pass def test_func(): pass class TestClass(object): def test_method(self): pass """ ) result = testdir.runpytest("-rs") result.stdout.fnmatch_lines(["*SKIP*2*test_one.py: Folding"]) assert result.ret == 0 def test_reportchars(testdir): testdir.makepyfile( """ import pytest def test_1(): assert 0 @pytest.mark.xfail def test_2(): assert 0 @pytest.mark.xfail def test_3(): pass def test_4(): pytest.skip("four") """ ) result = testdir.runpytest("-rfxXs") result.stdout.fnmatch_lines( ["FAIL*test_1*", "XFAIL*test_2*", "XPASS*test_3*", "SKIP*four*"] ) def test_reportchars_error(testdir): testdir.makepyfile( conftest=""" def pytest_runtest_teardown(): assert 0 """, test_simple=""" def test_foo(): pass """, ) result = testdir.runpytest("-rE") result.stdout.fnmatch_lines(["ERROR*test_foo*"]) def test_reportchars_all(testdir): testdir.makepyfile( """ import pytest def test_1(): assert 0 @pytest.mark.xfail def test_2(): assert 0 @pytest.mark.xfail def test_3(): pass def test_4(): pytest.skip("four") """ ) result = testdir.runpytest("-ra") result.stdout.fnmatch_lines( ["FAIL*test_1*", "SKIP*four*", "XFAIL*test_2*", "XPASS*test_3*"] ) def test_reportchars_all_error(testdir): testdir.makepyfile( conftest=""" def pytest_runtest_teardown(): assert 0 """, test_simple=""" def test_foo(): pass """, ) result = testdir.runpytest("-ra") result.stdout.fnmatch_lines(["ERROR*test_foo*"]) @pytest.mark.xfail("hasattr(sys, 'pypy_version_info')") def test_errors_in_xfail_skip_expressions(testdir): testdir.makepyfile( """ import pytest @pytest.mark.skipif("asd") def test_nameerror(): pass @pytest.mark.xfail("syntax error") def test_syntax(): pass def test_func(): pass """ ) result = testdir.runpytest() markline = " ^" if sys.platform.startswith("java"): # XXX report this to java markline = "*" + markline[8:] result.stdout.fnmatch_lines( [ "*ERROR*test_nameerror*", "*evaluating*skipif*expression*", "*asd*", "*ERROR*test_syntax*", "*evaluating*xfail*expression*", " syntax error", markline, "SyntaxError: invalid syntax", "*1 pass*2 error*", ] ) def test_xfail_skipif_with_globals(testdir): testdir.makepyfile( """ import pytest x = 3 @pytest.mark.skipif("x == 3") def test_skip1(): pass @pytest.mark.xfail("x == 3") def test_boolean(): assert 0 """ ) result = testdir.runpytest("-rsx") result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"]) def test_direct_gives_error(testdir): testdir.makepyfile( """ import pytest @pytest.mark.skipif(True) def test_skip1(): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 error*"]) def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines( [ "*skipif(*condition)*skip*", "*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*", ] ) def test_xfail_test_setup_exception(testdir): testdir.makeconftest( """ def pytest_runtest_setup(): 0 / 0 """ ) p = testdir.makepyfile( """ import pytest @pytest.mark.xfail def test_func(): assert 0 """ ) result = testdir.runpytest(p) assert result.ret == 0 assert "xfailed" in result.stdout.str() assert "xpassed" not in result.stdout.str() def test_imperativeskip_on_xfail_test(testdir): testdir.makepyfile( """ import pytest @pytest.mark.xfail def test_that_fails(): assert 0 @pytest.mark.skipif("True") def test_hello(): pass """ ) testdir.makeconftest( """ import pytest def pytest_runtest_setup(item): pytest.skip("abc") """ ) result = testdir.runpytest("-rsxX") result.stdout.fnmatch_lines_random( """ *SKIP*abc* *SKIP*condition: True* *2 skipped* """ ) class TestBooleanCondition(object): def test_skipif(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.skipif(True, reason="True123") def test_func1(): pass @pytest.mark.skipif(False, reason="True123") def test_func2(): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( """ *1 passed*1 skipped* """ ) def test_skipif_noreason(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.skipif(True) def test_func(): pass """ ) result = testdir.runpytest("-rs") result.stdout.fnmatch_lines( """ *1 error* """ ) def test_xfail(self, testdir): testdir.makepyfile( """ import pytest @pytest.mark.xfail(True, reason="True123") def test_func(): assert 0 """ ) result = testdir.runpytest("-rxs") result.stdout.fnmatch_lines( """ *XFAIL* *True123* *1 xfail* """ ) def test_xfail_item(testdir): # Ensure pytest.xfail works with non-Python Item testdir.makeconftest( """ import pytest class MyItem(pytest.Item): nodeid = 'foo' def runtest(self): pytest.xfail("Expected Failure") def pytest_collect_file(path, parent): return MyItem("foo", parent) """ ) result = testdir.inline_run() passed, skipped, failed = result.listoutcomes() assert not failed xfailed = [r for r in skipped if hasattr(r, "wasxfail")] assert xfailed def test_module_level_skip_error(testdir): """ Verify that using pytest.skip at module level causes a collection error """ testdir.makepyfile( """ import pytest @pytest.skip def test_func(): assert True """ ) result = testdir.runpytest() result.stdout.fnmatch_lines("*Using pytest.skip outside of a test is not allowed*") def test_module_level_skip_with_allow_module_level(testdir): """ Verify that using pytest.skip(allow_module_level=True) is allowed """ testdir.makepyfile( """ import pytest pytest.skip("skip_module_level", allow_module_level=True) def test_func(): assert 0 """ ) result = testdir.runpytest("-rxs") result.stdout.fnmatch_lines("*SKIP*skip_module_level") def test_invalid_skip_keyword_parameter(testdir): """ Verify that using pytest.skip() with unknown parameter raises an error """ testdir.makepyfile( """ import pytest pytest.skip("skip_module_level", unknown=1) def test_func(): assert 0 """ ) result = testdir.runpytest() result.stdout.fnmatch_lines("*TypeError:*['unknown']*") def test_mark_xfail_item(testdir): # Ensure pytest.mark.xfail works with non-Python Item testdir.makeconftest( """ import pytest class MyItem(pytest.Item): nodeid = 'foo' def setup(self): marker = pytest.mark.xfail(True, reason="Expected failure") self.add_marker(marker) def runtest(self): assert False def pytest_collect_file(path, parent): return MyItem("foo", parent) """ ) result = testdir.inline_run() passed, skipped, failed = result.listoutcomes() assert not failed xfailed = [r for r in skipped if hasattr(r, "wasxfail")] assert xfailed def test_summary_list_after_errors(testdir): """Ensure the list of errors/fails/xfails/skips appears after tracebacks in terminal reporting.""" testdir.makepyfile( """ import pytest def test_fail(): assert 0 """ ) result = testdir.runpytest("-ra") result.stdout.fnmatch_lines( [ "=* FAILURES *=", "*= short test summary info =*", "FAIL test_summary_list_after_errors.py::test_fail", ] ) """SCons.Tool.sgilink Tool-specific initialization for the SGI MIPSPro linker on SGI. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/sgilink.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo" import SCons.Util import link linkers = ['CC', 'cc'] def generate(env): """Add Builders and construction variables for MIPSPro to an Environment.""" link.generate(env) env['LINK'] = env.Detect(linkers) or 'cc' env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared') # __RPATH is set to $_RPATH in the platform specification if that # platform supports it. env['RPATHPREFIX'] = '-rpath ' env['RPATHSUFFIX'] = '' env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}' def exists(env): return env.Detect(linkers) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4: # -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Stratum' db.create_table('cvmfsmon_stratum', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200)), ('alias', self.gf('django.db.models.fields.CharField')(max_length=20)), ('level', self.gf('django.db.models.fields.IntegerField')()), )) db.send_create_signal('cvmfsmon', ['Stratum']) # Adding unique constraint on 'Stratum', fields ['alias', 'level'] db.create_unique('cvmfsmon_stratum', ['alias', 'level']) # Adding model 'Repository' db.create_table('cvmfsmon_repository', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('fqrn', self.gf('django.db.models.fields.CharField')(max_length=100)), ('project_url', self.gf('django.db.models.fields.URLField')(max_length=255, blank=True)), ('project_description', self.gf('django.db.models.fields.TextField')(blank=True)), ('stratum0', self.gf('django.db.models.fields.related.ForeignKey')(related_name='stratum0', to=orm['cvmfsmon.Stratum'])), )) db.send_create_signal('cvmfsmon', ['Repository']) # Adding M2M table for field stratum1s on 'Repository' m2m_table_name = db.shorten_name('cvmfsmon_repository_stratum1s') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('repository', models.ForeignKey(orm['cvmfsmon.repository'], null=False)), ('stratum', models.ForeignKey(orm['cvmfsmon.stratum'], null=False)) )) db.create_unique(m2m_table_name, ['repository_id', 'stratum_id']) def backwards(self, orm): # Removing unique constraint on 'Stratum', fields ['alias', 'level'] db.delete_unique('cvmfsmon_stratum', ['alias', 'level']) # Deleting model 'Stratum' db.delete_table('cvmfsmon_stratum') # Deleting model 'Repository' db.delete_table('cvmfsmon_repository') # Removing M2M table for field stratum1s on 'Repository' db.delete_table(db.shorten_name('cvmfsmon_repository_stratum1s')) models = { 'cvmfsmon.repository': { 'Meta': {'object_name': 'Repository'}, 'fqrn': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'project_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'project_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}), 'stratum0': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stratum0'", 'to': "orm['cvmfsmon.Stratum']"}), 'stratum1s': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'stratum1s'", 'symmetrical': 'False', 'to': "orm['cvmfsmon.Stratum']"}) }, 'cvmfsmon.stratum': { 'Meta': {'unique_together': "(('alias', 'level'),)", 'object_name': 'Stratum'}, 'alias': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.IntegerField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}) } } complete_apps = ['cvmfsmon'] import pytest from flexget.task import TaskAbort class TestQualityPriority: config = """ tasks: test_reorder_quality: mock: - {title: 'Some Show S01E01 WEBRip'} - {title: 'Some Show S01E01 HDTV'} reorder_quality: webrip: above: hdtv sort_by: field: quality reverse: yes test_normal_quality_priority: mock: - {title: 'Some Show S01E02 WEBRip'} - {title: 'Some Show S01E02 HDTV'} sort_by: field: quality reverse: yes test_invalid_reorder_quality: reorder_quality: h264: above: hdtv """ def test_reorder_quality(self, execute_task): task = execute_task('test_reorder_quality') assert ( task.all_entries[0]['title'] == 'Some Show S01E01 WEBRip' ), 'WEBRip should have been accepted' task = execute_task('test_normal_quality_priority') assert ( task.all_entries[0]['title'] == 'Some Show S01E02 HDTV' ), 'HDTV should have been accepted' def test_invalid_reorder_quality(self, execute_task): with pytest.raises(TaskAbort) as e: execute_task('test_invalid_reorder_quality') assert e.value.reason == 'h264=codec and hdtv=source do not have the same quality type' # -*- coding: utf-8 -*- # # This file is part of Zenodo. # Copyright (C) 2016 CERN. # # Zenodo is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Zenodo is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Zenodo; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """JSON schemas compiler for Zenodo.""" from __future__ import absolute_import, print_function from . import config from .cli import jsonschemas class ZenodoJSONSchemas(object): """Zenodo records extension.""" def __init__(self, app=None): """Extension initialization.""" if app: self.init_app(app) def init_app(self, app): """Flask application initialization.""" self.init_config(app) app.extensions['zenodo-jsonschemas'] = self app.cli.add_command(jsonschemas) @staticmethod def init_config(app): """Initialize configuration.""" for k in dir(config): if k.startswith('ZENODO_JSONSCHEMAS_'): app.config.setdefault(k, getattr(config, k)) #!/usr/bin/python # # Cpu task migration overview toy # # Copyright (C) 2010 Frederic Weisbecker # # perf script event handlers have been generated by perf script -g python # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import os import sys from collections import defaultdict from UserList import UserList sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from SchedGui import * threads = { 0 : "idle"} def thread_name(pid): return "%s:%d" % (threads[pid], pid) class RunqueueEventUnknown: @staticmethod def color(): return None def __repr__(self): return "unknown" class RunqueueEventSleep: @staticmethod def color(): return (0, 0, 0xff) def __init__(self, sleeper): self.sleeper = sleeper def __repr__(self): return "%s gone to sleep" % thread_name(self.sleeper) class RunqueueEventWakeup: @staticmethod def color(): return (0xff, 0xff, 0) def __init__(self, wakee): self.wakee = wakee def __repr__(self): return "%s woke up" % thread_name(self.wakee) class RunqueueEventFork: @staticmethod def color(): return (0, 0xff, 0) def __init__(self, child): self.child = child def __repr__(self): return "new forked task %s" % thread_name(self.child) class RunqueueMigrateIn: @staticmethod def color(): return (0, 0xf0, 0xff) def __init__(self, new): self.new = new def __repr__(self): return "task migrated in %s" % thread_name(self.new) class RunqueueMigrateOut: @staticmethod def color(): return (0xff, 0, 0xff) def __init__(self, old): self.old = old def __repr__(self): return "task migrated out %s" % thread_name(self.old) class RunqueueSnapshot: def __init__(self, tasks = [0], event = RunqueueEventUnknown()): self.tasks = tuple(tasks) self.event = event def sched_switch(self, prev, prev_state, next): event = RunqueueEventUnknown() if taskState(prev_state) == "R" and next in self.tasks \ and prev in self.tasks: return self if taskState(prev_state) != "R": event = RunqueueEventSleep(prev) next_tasks = list(self.tasks[:]) if prev in self.tasks: if taskState(prev_state) != "R": next_tasks.remove(prev) elif taskState(prev_state) == "R": next_tasks.append(prev) if next not in next_tasks: next_tasks.append(next) return RunqueueSnapshot(next_tasks, event) def migrate_out(self, old): if old not in self.tasks: return self next_tasks = [task for task in self.tasks if task != old] return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old)) def __migrate_in(self, new, event): if new in self.tasks: self.event = event return self next_tasks = self.tasks[:] + tuple([new]) return RunqueueSnapshot(next_tasks, event) def migrate_in(self, new): return self.__migrate_in(new, RunqueueMigrateIn(new)) def wake_up(self, new): return self.__migrate_in(new, RunqueueEventWakeup(new)) def wake_up_new(self, new): return self.__migrate_in(new, RunqueueEventFork(new)) def load(self): """ Provide the number of tasks on the runqueue. Don't count idle""" return len(self.tasks) - 1 def __repr__(self): ret = self.tasks.__repr__() ret += self.origin_tostring() return ret class TimeSlice: def __init__(self, start, prev): self.start = start self.prev = prev self.end = start # cpus that triggered the event self.event_cpus = [] if prev is not None: self.total_load = prev.total_load self.rqs = prev.rqs.copy() else: self.rqs = defaultdict(RunqueueSnapshot) self.total_load = 0 def __update_total_load(self, old_rq, new_rq): diff = new_rq.load() - old_rq.load() self.total_load += diff def sched_switch(self, ts_list, prev, prev_state, next, cpu): old_rq = self.prev.rqs[cpu] new_rq = old_rq.sched_switch(prev, prev_state, next) if old_rq is new_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def migrate(self, ts_list, new, old_cpu, new_cpu): if old_cpu == new_cpu: return old_rq = self.prev.rqs[old_cpu] out_rq = old_rq.migrate_out(new) self.rqs[old_cpu] = out_rq self.__update_total_load(old_rq, out_rq) new_rq = self.prev.rqs[new_cpu] in_rq = new_rq.migrate_in(new) self.rqs[new_cpu] = in_rq self.__update_total_load(new_rq, in_rq) ts_list.append(self) if old_rq is not out_rq: self.event_cpus.append(old_cpu) self.event_cpus.append(new_cpu) def wake_up(self, ts_list, pid, cpu, fork): old_rq = self.prev.rqs[cpu] if fork: new_rq = old_rq.wake_up_new(pid) else: new_rq = old_rq.wake_up(pid) if new_rq is old_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def next(self, t): self.end = t return TimeSlice(t, self) class TimeSliceList(UserList): def __init__(self, arg = []): self.data = arg def get_time_slice(self, ts): if len(self.data) == 0: slice = TimeSlice(ts, TimeSlice(-1, None)) else: slice = self.data[-1].next(ts) return slice def find_time_slice(self, ts): start = 0 end = len(self.data) found = -1 searching = True while searching: if start == end or start == end - 1: searching = False i = (end + start) / 2 if self.data[i].start <= ts and self.data[i].end >= ts: found = i end = i continue if self.data[i].end < ts: start = i elif self.data[i].start > ts: end = i return found def set_root_win(self, win): self.root_win = win def mouse_down(self, cpu, t): idx = self.find_time_slice(t) if idx == -1: return ts = self[idx] rq = ts.rqs[cpu] raw = "CPU: %d\n" % cpu raw += "Last event : %s\n" % rq.event.__repr__() raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000) raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6)) raw += "Load = %d\n" % rq.load() for t in rq.tasks: raw += "%s \n" % thread_name(t) self.root_win.update_summary(raw) def update_rectangle_cpu(self, slice, cpu): rq = slice.rqs[cpu] if slice.total_load != 0: load_rate = rq.load() / float(slice.total_load) else: load_rate = 0 red_power = int(0xff - (0xff * load_rate)) color = (0xff, red_power, red_power) top_color = None if cpu in slice.event_cpus: top_color = rq.event.color() self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end) def fill_zone(self, start, end): i = self.find_time_slice(start) if i == -1: return for i in xrange(i, len(self.data)): timeslice = self.data[i] if timeslice.start > end: return for cpu in timeslice.rqs: self.update_rectangle_cpu(timeslice, cpu) def interval(self): if len(self.data) == 0: return (0, 0) return (self.data[0].start, self.data[-1].end) def nr_rectangles(self): last_ts = self.data[-1] max_cpu = 0 for cpu in last_ts.rqs: if cpu > max_cpu: max_cpu = cpu return max_cpu class SchedEventProxy: def __init__(self): self.current_tsk = defaultdict(lambda : -1) self.timeslices = TimeSliceList() def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): """ Ensure the task we sched out this cpu is really the one we logged. Otherwise we may have missed traces """ on_cpu_task = self.current_tsk[headers.cpu] if on_cpu_task != -1 and on_cpu_task != prev_pid: print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) threads[prev_pid] = prev_comm threads[next_pid] = next_comm self.current_tsk[headers.cpu] = next_pid ts = self.timeslices.get_time_slice(headers.ts()) ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu) def migrate(self, headers, pid, prio, orig_cpu, dest_cpu): ts = self.timeslices.get_time_slice(headers.ts()) ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu) def wake_up(self, headers, comm, pid, success, target_cpu, fork): if success == 0: return ts = self.timeslices.get_time_slice(headers.ts()) ts.wake_up(self.timeslices, pid, target_cpu, fork) def trace_begin(): global parser parser = SchedEventProxy() def trace_end(): app = wx.App(False) timeslices = parser.timeslices frame = RootFrame(timeslices, "Migration") app.MainLoop() def sched__sched_stat_runtime(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, runtime, vruntime): pass def sched__sched_stat_iowait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, delay): pass def sched__sched_stat_sleep(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, delay): pass def sched__sched_stat_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, delay): pass def sched__sched_process_fork(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, parent_comm, parent_pid, child_comm, child_pid): pass def sched__sched_process_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_process_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_process_free(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_migrate_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio, orig_cpu, dest_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.migrate(headers, pid, prio, orig_cpu, dest_cpu) def sched__sched_switch(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio) def sched__sched_wakeup_new(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.wake_up(headers, comm, pid, success, target_cpu, 1) def sched__sched_wakeup(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.wake_up(headers, comm, pid, success, target_cpu, 0) def sched__sched_wait_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_kthread_stop_ret(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, ret): pass def sched__sched_kthread_stop(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid): pass def trace_unhandled(event_name, context, event_fields_dict): pass """Compatibility with the nrpe-external-master charm""" # Copyright 2012 Canonical Ltd. # # Authors: # Matthew Wedgwood import subprocess import pwd import grp import os import re import shlex import yaml from charmhelpers.core.hookenv import ( config, local_unit, log, relation_ids, relation_set, ) from charmhelpers.core.host import service # This module adds compatibility with the nrpe-external-master and plain nrpe # subordinate charms. To use it in your charm: # # 1. Update metadata.yaml # # provides: # (...) # nrpe-external-master: # interface: nrpe-external-master # scope: container # # and/or # # provides: # (...) # local-monitors: # interface: local-monitors # scope: container # # 2. Add the following to config.yaml # # nagios_context: # default: "juju" # type: string # description: | # Used by the nrpe subordinate charms. # A string that will be prepended to instance name to set the host name # in nagios. So for instance the hostname would be something like: # juju-myservice-0 # If you're running multiple environments with the same services in them # this allows you to differentiate between them. # # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master # # 4. Update your hooks.py with something like this: # # from charmsupport.nrpe import NRPE # (...) # def update_nrpe_config(): # nrpe_compat = NRPE() # nrpe_compat.add_check( # shortname = "myservice", # description = "Check MyService", # check_cmd = "check_http -w 2 -c 10 http://localhost" # ) # nrpe_compat.add_check( # "myservice_other", # "Check for widget failures", # check_cmd = "/srv/myapp/scripts/widget_check" # ) # nrpe_compat.write() # # def config_changed(): # (...) # update_nrpe_config() # # def nrpe_external_master_relation_changed(): # update_nrpe_config() # # def local_monitors_relation_changed(): # update_nrpe_config() # # 5. ln -s hooks.py nrpe-external-master-relation-changed # ln -s hooks.py local-monitors-relation-changed class CheckException(Exception): pass class Check(object): shortname_re = '[A-Za-z0-9-_]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed #--------------------------------------------------- define service {{ use active-service host_name {nagios_hostname} service_description {nagios_hostname}[{shortname}] """ """{description} check_command check_nrpe!{command} servicegroups {nagios_servicegroup} }} """) def __init__(self, shortname, description, check_cmd): super(Check, self).__init__() # XXX: could be better to calculate this from the service name if not re.match(self.shortname_re, shortname): raise CheckException("shortname must match {}".format( Check.shortname_re)) self.shortname = shortname self.command = "check_{}".format(shortname) # Note: a set of invalid characters is defined by the # Nagios server config # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= self.description = description self.check_cmd = self._locate_cmd(check_cmd) def _locate_cmd(self, check_cmd): search_path = ( '/usr/lib/nagios/plugins', '/usr/local/lib/nagios/plugins', ) parts = shlex.split(check_cmd) for path in search_path: if os.path.exists(os.path.join(path, parts[0])): command = os.path.join(path, parts[0]) if len(parts) > 1: command += " " + " ".join(parts[1:]) return command log('Check command not found: {}'.format(parts[0])) return '' def write(self, nagios_context, hostname): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: nrpe_check_config.write("# check {}\n".format(self.shortname)) nrpe_check_config.write("command[{}]={}\n".format( self.command, self.check_cmd)) if not os.path.exists(NRPE.nagios_exportdir): log('Not writing service config as {} is not accessible'.format( NRPE.nagios_exportdir)) else: self.write_service_config(nagios_context, hostname) def write_service_config(self, nagios_context, hostname): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_context, 'description': self.description, 'shortname': self.shortname, 'command': self.command, } nrpe_service_text = Check.service_template.format(**templ_vars) nrpe_service_file = '{}/service__{}_{}.cfg'.format( NRPE.nagios_exportdir, hostname, self.command) with open(nrpe_service_file, 'w') as nrpe_service_config: nrpe_service_config.write(str(nrpe_service_text)) def run(self): subprocess.call(self.check_cmd) class NRPE(object): nagios_logdir = '/var/log/nagios' nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname else: self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid nagios_gid = grp.getgrnam('nagios').gr_gid except: log("Nagios user not set up, nrpe checks not updated") return if not os.path.exists(NRPE.nagios_logdir): os.mkdir(NRPE.nagios_logdir) os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} for nrpecheck in self.checks: nrpecheck.write(self.nagios_context, self.hostname) nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } service('restart', 'nagios-nrpe-server') for rid in relation_ids("local-monitors"): relation_set(relation_id=rid, monitors=yaml.dump(monitors)) #! /usr/bin/env python """Hook to generate a CSV file summarising the schedule so that we can easily see which sessions have changed and who needs to be notified. There's a lot of copying and pasting from Kev's guidebook.py. """ import codecs import csv import cStringIO import io import os from flat_schedule import mkdirs, read_html_tabular_schedule EVENT_TYPES = {'demo', 'workshop', 'talk', 'panel'} class UnicodeWriter(object): # https://docs.python.org/2.7/library/csv.html#writer-objects def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds): self.queue = cStringIO.StringIO() self.writer = csv.writer(self.queue, dialect=dialect, **kwds) self.stream = f self.encoder = codecs.getincrementalencoder(encoding)() def writerow(self, row): self.writer.writerow([s.encode('utf-8') for s in row]) data = self.queue.getvalue() data = data.decode('utf-8') data = self.encoder.encode(data) self.stream.write(data) self.queue.truncate(0) def writerows(self, rows): for row in rows: self.writerow(row) def write_summary_schedule(schedule, config): schedule_dir = os.path.join(config['output_dir'], 'schedule', 'summary') schedule_path = os.path.join(schedule_dir, 'summary.csv') mkdirs(schedule_dir) headings = ['Title', 'Speaker', 'Date', 'Time', 'Room'] with io.open(schedule_path, 'wb') as csvfile: writer = UnicodeWriter(csvfile) writer.writerow(headings) for talk in schedule: writer.writerow(make_row(talk, config)) def make_row(talk, config): title = talk['title'] speaker = extract_speaker(talk, config) date = talk['start'].strftime('%Y-%m-%d') time = talk['start'].strftime('%H:%M') room = talk['location'] return [title, speaker, date, time, room] def extract_speaker(talk, config): if talk['type'] in EVENT_TYPES: path = os.path.join(config['content_dir'], talk['href'].strip('/') + '.md') with open(path) as f: for line in f: line = line.decode('utf-8') if line[:4] == '### ': return line[4:].strip() return '' def create_summary_schedule(config): schedule = read_html_tabular_schedule(config) write_summary_schedule(schedule, config) if __name__ == '__main__': config = { 'template_dir': 'templates', 'output_dir': 'output', 'content_dir': 'content' } create_summary_schedule(config) #!/usr/bin/env python # -*- coding: utf-8 -*- ## This utility takes the debian directory from an unpacked debian mono source ## tree (e.g. apt-get source mono), parses the *.install files and generates a ## bitbake include file with the file and package lists. It tries to handle -dbg ## packages by creating additional glob patterns for *.mdb and */.debug/*. Most ## of these will not match, but that's fine (I think). ## -- Henryk Plötz ## ##The output looks like: ##FILES_mono-jit-dbg = "/usr/bin/mono*.mdb \ ## /usr/bin/mono*/*.mdb" ##FILES_mono-jit = "/usr/bin/mono" ##FILES_mono-gac-dbg = "/usr/bin/gacutil*.mdb \ ## /usr/bin/gacutil*/*.mdb \ ## /usr/lib/mono/1.0/gacutil.exe*.mdb \ ## /usr/lib/mono/1.0/gacutil.exe*/*.mdb" ##FILES_mono-gac = "/usr/bin/gacutil \ ## /usr/lib/mono/1.0/gacutil.exe" ## ... ##PACKAGES = "mono-jit-dbg \ ## mono-jit \ ## mono-gac-dbg \ ## mono-gac \ ## ... import os, sys, re def collect_paths(dir): paths = {} os.chdir(dir) for filename in os.listdir("."): if filename.endswith(".install"): fp = file(filename, "r") lines = fp.readlines() fp.close() contents = [] for line in lines: line = line.strip() if line.startswith("#"): continue if line == "": continue lineparts = line.split() if lineparts[0].startswith("debian/tmp"): pattern = lineparts[0][ len("debian/tmp"): ] if len(lineparts) == 2: if not pattern.startswith(lineparts[1]): print >>sys.stderr, "Warning: Apparently I don't fully understand the format in file %s" % filename elif len(lineparts) > 2: print >>sys.stderr, "Warning: Apparently I don't fully understand the format in file %s" % filename contents.append( pattern ) else: print >>sys.stderr, "Note: Ignoring %s in %s" % (lineparts, filename) paths[ filename[ :-len(".install") ] ] = contents return paths def collect_packages(paths): # These packages should be populated first (e.g. because their files will otherwise end up # in other packages) PACKAGES_FIRST = ("mono-jit", "mono-gac", "mono-mjs", "mono-gmcs", "mono-utils", "mono-doc") # These should be populated last (because their spec is very broad) PACKAGES_LAST = ("mono-mcs", "libmono-system1.0-cil", "libmono-system2.0-cil", "libmono1.0-cil", "libmono2.0-cil") first = [] last = [] packages = paths.keys() for packagename in PACKAGES_FIRST + PACKAGES_LAST: if packagename in packages: packages.remove(packagename) if packagename in PACKAGES_FIRST: first.append(packagename) else: last.append(packagename) packagenames = first + packages + last return packagenames, paths def debugify(packagenames, paths): pnames = [] for pkg in packagenames: if not pkg.endswith("-dbg"): result = [] for path in paths[pkg]: if not path.endswith("*"): result.append(path + "*.mdb") result.append(path + "*/*.mdb") else: result.append(path + ".mdb") result.append(path + "/*.mdb") if path.endswith("/"): result.append(path + ".debug/") result.append(path + "../.debug/") paths[pkg + "-dbg"] = result pnames.append(pkg + "-dbg") pnames.append(pkg) return pnames, paths if __name__ == "__main__": packagenames, paths = collect_packages( collect_paths(".") ) packagenames, paths = debugify(packagenames, paths) print "# This is a generated file, please do not edit directly" print "# Use collect-files.py instead. -- Henryk " packages = [] for pkg in packagenames: if not paths[pkg]: continue print 'FILES_%s = "%s"' % (pkg, " \\\n\t".join(paths[pkg])) packages.append(pkg) print print 'PACKAGES = "%s"' % (" \\\n\t".join(packages)) #!/usr/bin/env python #coding: utf-8 from collections import defaultdict import sys import datetime result = defaultdict(lambda : defaultdict(lambda :defaultdict(set))) def print_result(): for date_key in result.iterkeys(): year, month, req_type = date_key.split('_') for from_country in result[date_key].iterkeys(): for req_country in result[date_key][from_country].iterkeys(): print '{};{:02d};{};{};{};{}'.format(year,int(month),from_country,req_country,req_type,len(result[date_key][from_country][req_country])) try: with sys.stdin as file: for rec in file: try: parts = rec.strip().split('|') req_type = 'R' if len(parts) == 6 and parts[5]=='.routing' else 'M' from_country = parts[0] date = datetime.datetime.strptime(parts[2], '%d/%b/%Y:%H:%M:%S') user_id = parts[3] req_country = parts[4].split('_')[0] date_key = '{}_{}_{}'.format(date.year,date.month,req_type) user_key = '{}_{}'.format(user_id,req_country) result[date_key][from_country][req_country].add(user_key) except: pass # ignore all errors for one string except KeyboardInterrupt: print_result() exit(0) except: print_result() raise print_result() # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('browser', '0003_auto_20150320_0253'), ] operations = [ migrations.CreateModel( name='ROnline', fields=[ ('resource_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='browser.Resource')), ('otype', models.CharField(max_length=1, choices=[(b'0', b'video'), (b'1', b'article'), (b'2', b'web site')])), ('date', models.DateTimeField()), ('url', models.TextField(blank=True)), ], options={ }, bases=('browser.resource',), ), migrations.CreateModel( name='SServices', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('diagnostic', models.BooleanField(default=False)), ('resource', models.BooleanField(default=False)), ('therapy', models.BooleanField(default=False)), ('educational', models.BooleanField(default=False)), ('referral', models.BooleanField(default=False)), ('legal', models.BooleanField(default=False)), ('city', models.CharField(max_length=30)), ('resourceLink', models.ForeignKey(to='browser.Resource')), ], options={ }, bases=(models.Model,), ), migrations.DeleteModel( name='SService', ), ] # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import wx import armid from BasePanel import BasePanel import ConceptReference from Borg import Borg class ConceptReferencePanel(BasePanel): def __init__(self,parent): BasePanel.__init__(self,parent,armid.CONCEPTREFERENCE_ID) self.theId = None b = Borg() self.dbProxy = b.dbProxy def buildControls(self,isCreate,isUpdateable=True): mainSizer = wx.BoxSizer(wx.VERTICAL) mainSizer.Add(self.buildTextSizer('Name',(87,30),armid.CONCEPTREFERENCE_TEXTNAME_ID),0,wx.EXPAND) dims = ['asset','attacker','countermeasure','domainproperty','environment','goal','misusecase','obstacle','persona','requirement','response','risk','role','task','threat','vulnerability'] mainSizer.Add(self.buildComboSizerList('Concept',(87,30),armid.CONCEPTREFERENCE_COMBODIMNAME_ID,dims),0,wx.EXPAND) mainSizer.Add(self.buildComboSizerList('Object',(87,30),armid.CONCEPTREFERENCE_COMBOOBJTNAME_ID,[]),0,wx.EXPAND) mainSizer.Add(self.buildMLTextSizer('Description',(87,30),armid.CONCEPTREFERENCE_TEXTDESCRIPTION_ID),1,wx.EXPAND) mainSizer.Add(self.buildCommitButtonSizer(armid.CONCEPTREFERENCE_BUTTONCOMMIT_ID,isCreate),0,wx.CENTER) wx.EVT_COMBOBOX(self,armid.CONCEPTREFERENCE_COMBODIMNAME_ID,self.onDimensionChange) self.SetSizer(mainSizer) def loadControls(self,objt,isReadOnly=False): self.theId = objt.id() nameCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_TEXTNAME_ID) dimCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_COMBODIMNAME_ID) objtCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_COMBOOBJTNAME_ID) descCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_TEXTDESCRIPTION_ID) nameCtrl.SetValue(objt.name()) dimCtrl.SetValue(objt.dimension()) objtCtrl.SetValue(objt.objectName()) descCtrl.SetValue(objt.description()) def onDimensionChange(self,evt): dimName = evt.GetString() objts = self.dbProxy.getDimensionNames(dimName) objtCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_COMBOOBJTNAME_ID) objtCtrl.SetItems(objts) """ Dummy database backend for Django. Django uses this if the database ENGINE setting is empty (None or empty string). Each of these API functions, except connection.close(), raises ImproperlyConfigured. """ from django.core.exceptions import ImproperlyConfigured from django.db.backends import * from django.db.backends.creation import BaseDatabaseCreation def complain(*args, **kwargs): raise ImproperlyConfigured("settings.DATABASES is improperly configured. " "Please supply the ENGINE value. Check " "settings documentation for more details.") def ignore(*args, **kwargs): pass class DatabaseError(Exception): pass class IntegrityError(DatabaseError): pass class DatabaseOperations(BaseDatabaseOperations): quote_name = complain class DatabaseClient(BaseDatabaseClient): runshell = complain class DatabaseCreation(BaseDatabaseCreation): create_test_db = ignore destroy_test_db = ignore class DatabaseIntrospection(BaseDatabaseIntrospection): get_table_list = complain get_table_description = complain get_relations = complain get_indexes = complain get_key_columns = complain class DatabaseWrapper(BaseDatabaseWrapper): operators = {} # Override the base class implementations with null # implementations. Anything that tries to actually # do something raises complain; anything that tries # to rollback or undo something raises ignore. _commit = complain _rollback = ignore enter_transaction_management = complain leave_transaction_management = ignore set_dirty = complain set_clean = complain commit_unless_managed = complain rollback_unless_managed = ignore savepoint = ignore savepoint_commit = complain savepoint_rollback = ignore close = ignore cursor = complain def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.features = BaseDatabaseFeatures(self) self.ops = DatabaseOperations(self) self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = BaseDatabaseValidation(self) import logging import os from openpyxl import Workbook logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s \t', level=logging.INFO) EXCEL_DIR = '/home/lucasx/PycharmProjects/DataHouse/DataSet/' def write_excel(list_, filename): mkdirs_if_not_exists(EXCEL_DIR) wb = Workbook() ws = wb.active ws.title = "HouseInfo" ws.cell(row=1, column=1).value = 'address' ws.cell(row=1, column=2).value = 'area' ws.cell(row=1, column=3).value = 'block' ws.cell(row=1, column=4).value = 'buildYear' ws.cell(row=1, column=5).value = 'image' ws.cell(row=1, column=6).value = 'midPrice' ws.cell(row=1, column=7).value = 'name' ws.cell(row=1, column=8).value = 'saleNum' ws.cell(row=1, column=9).value = 'url' rownum = 2 for each_item in list_: ws.cell(row=rownum, column=1).value = each_item.address ws.cell(row=rownum, column=2).value = each_item.area ws.cell(row=rownum, column=3).value = each_item.block ws.cell(row=rownum, column=4).value = each_item.buildYear ws.cell(row=rownum, column=5).value = each_item.image ws.cell(row=rownum, column=6).value = each_item.midPrice ws.cell(row=rownum, column=7).value = each_item.name ws.cell(row=rownum, column=8).value = each_item.saleNum ws.cell(row=rownum, column=9).value = each_item.url rownum += 1 wb.save(EXCEL_DIR + filename + '.xlsx') logging.info('Excel生成成功!') def mkdirs_if_not_exists(directory_): """create a new folder if it does not exist""" if not os.path.exists(directory_) or not os.path.isdir(directory_): os.makedirs(directory_) # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Export the Python grammar and symbols.""" # Python imports import os # Local imports from .pgen2 import token from .pgen2 import driver from . import pytree # The grammar file _GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "PatternGrammar.txt") class Symbols(object): def __init__(self, grammar): """Initializer. Creates an attribute for each grammar symbol (nonterminal), whose value is the symbol's type (an int >= 256). """ for name, symbol in grammar.symbol2number.iteritems(): setattr(self, name, symbol) python_grammar = driver.load_grammar(_GRAMMAR_FILE) python_symbols = Symbols(python_grammar) python_grammar_no_print_statement = python_grammar.copy() del python_grammar_no_print_statement.keywords["print"] pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE) pattern_symbols = Symbols(pattern_grammar) # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, float_compare, float_round from openerp import SUPERUSER_ID from dateutil.relativedelta import relativedelta from datetime import datetime from psycopg2 import OperationalError import openerp class procurement_group(osv.osv): _inherit = 'procurement.group' _columns = { 'partner_id': fields.many2one('res.partner', 'Partner') } class procurement_rule(osv.osv): _inherit = 'procurement.rule' def _get_action(self, cr, uid, context=None): result = super(procurement_rule, self)._get_action(cr, uid, context=context) return result + [('move', _('Move From Another Location'))] def _get_rules(self, cr, uid, ids, context=None): res = [] for route in self.browse(cr, uid, ids): res += [x.id for x in route.pull_ids] return res _columns = { 'location_id': fields.many2one('stock.location', 'Procurement Location'), 'location_src_id': fields.many2one('stock.location', 'Source Location', help="Source location is action=move"), 'route_id': fields.many2one('stock.location.route', 'Route', help="If route_id is False, the rule is global"), 'procure_method': fields.selection([('make_to_stock', 'Take From Stock'), ('make_to_order', 'Create Procurement')], 'Move Supply Method', required=True, help="""Determines the procurement method of the stock move that will be generated: whether it will need to 'take from the available stock' in its source location or needs to ignore its stock and create a procurement over there."""), 'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence', store={ 'stock.location.route': (_get_rules, ['sequence'], 10), 'procurement.rule': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10), }), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', help="Picking Type determines the way the picking should be shown in the view, reports, ..."), 'delay': fields.integer('Number of Days'), 'partner_address_id': fields.many2one('res.partner', 'Partner Address'), 'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too'), 'warehouse_id': fields.many2one('stock.warehouse', 'Served Warehouse', help='The warehouse this rule is for'), 'propagate_warehouse_id': fields.many2one('stock.warehouse', 'Warehouse to Propagate', help="The warehouse to propagate on the created move/procurement, which can be different of the warehouse this rule is for (e.g for resupplying rules from another warehouse)"), } _defaults = { 'procure_method': 'make_to_stock', 'propagate': True, 'delay': 0, } class procurement_order(osv.osv): _inherit = "procurement.order" _columns = { 'location_id': fields.many2one('stock.location', 'Procurement Location'), # not required because task may create procurements that aren't linked to a location with sale_service 'partner_dest_id': fields.many2one('res.partner', 'Customer Address', help="In case of dropshipping, we need to know the destination address more precisely"), 'move_ids': fields.one2many('stock.move', 'procurement_id', 'Moves', help="Moves created by the procurement"), 'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Move which caused (created) the procurement"), 'route_ids': fields.many2many('stock.location.route', 'stock_location_route_procurement', 'procurement_id', 'route_id', 'Preferred Routes', help="Preferred route to be followed by the procurement order. Usually copied from the generating document (SO) but could be set up manually."), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Warehouse to consider for the route selection"), 'orderpoint_id': fields.many2one('stock.warehouse.orderpoint', 'Minimum Stock Rule'), } def propagate_cancel(self, cr, uid, procurement, context=None): if procurement.rule_id.action == 'move' and procurement.move_ids: self.pool.get('stock.move').action_cancel(cr, uid, [m.id for m in procurement.move_ids], context=context) def cancel(self, cr, uid, ids, context=None): if context is None: context = {} to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context) ctx = context.copy() #set the context for the propagation of the procurement cancelation ctx['cancel_procurement'] = True for procurement in self.browse(cr, uid, to_cancel_ids, context=ctx): self.propagate_cancel(cr, uid, procurement, context=ctx) return super(procurement_order, self).cancel(cr, uid, to_cancel_ids, context=ctx) def _find_parent_locations(self, cr, uid, procurement, context=None): location = procurement.location_id res = [location.id] while location.location_id: location = location.location_id res.append(location.id) return res def change_warehouse_id(self, cr, uid, ids, warehouse_id, context=None): if warehouse_id: warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context) return {'value': {'location_id': warehouse.lot_stock_id.id}} return {} def _search_suitable_rule(self, cr, uid, procurement, domain, context=None): '''we try to first find a rule among the ones defined on the procurement order group and if none is found, we try on the routes defined for the product, and finally we fallback on the default behavior''' pull_obj = self.pool.get('procurement.rule') warehouse_route_ids = [] if procurement.warehouse_id: domain += ['|', ('warehouse_id', '=', procurement.warehouse_id.id), ('warehouse_id', '=', False)] warehouse_route_ids = [x.id for x in procurement.warehouse_id.route_ids] product_route_ids = [x.id for x in procurement.product_id.route_ids + procurement.product_id.categ_id.total_route_ids] procurement_route_ids = [x.id for x in procurement.route_ids] res = pull_obj.search(cr, uid, domain + [('route_id', 'in', procurement_route_ids)], order='route_sequence, sequence', context=context) if not res: res = pull_obj.search(cr, uid, domain + [('route_id', 'in', product_route_ids)], order='route_sequence, sequence', context=context) if not res: res = warehouse_route_ids and pull_obj.search(cr, uid, domain + [('route_id', 'in', warehouse_route_ids)], order='route_sequence, sequence', context=context) or [] if not res: res = pull_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context) return res def _find_suitable_rule(self, cr, uid, procurement, context=None): rule_id = super(procurement_order, self)._find_suitable_rule(cr, uid, procurement, context=context) if not rule_id: #a rule defined on 'Stock' is suitable for a procurement in 'Stock\Bin A' all_parent_location_ids = self._find_parent_locations(cr, uid, procurement, context=context) rule_id = self._search_suitable_rule(cr, uid, procurement, [('location_id', 'in', all_parent_location_ids)], context=context) rule_id = rule_id and rule_id[0] or False return rule_id def _run_move_create(self, cr, uid, procurement, context=None): ''' Returns a dictionary of values that will be used to create a stock move from a procurement. This function assumes that the given procurement has a rule (action == 'move') set on it. :param procurement: browse record :rtype: dictionary ''' newdate = (datetime.strptime(procurement.date_planned, '%Y-%m-%d %H:%M:%S') - relativedelta(days=procurement.rule_id.delay or 0)).strftime('%Y-%m-%d %H:%M:%S') group_id = False if procurement.rule_id.group_propagation_option == 'propagate': group_id = procurement.group_id and procurement.group_id.id or False elif procurement.rule_id.group_propagation_option == 'fixed': group_id = procurement.rule_id.group_id and procurement.rule_id.group_id.id or False #it is possible that we've already got some move done, so check for the done qty and create #a new move with the correct qty already_done_qty = 0 already_done_qty_uos = 0 for move in procurement.move_ids: already_done_qty += move.product_uom_qty if move.state == 'done' else 0 already_done_qty_uos += move.product_uos_qty if move.state == 'done' else 0 qty_left = max(procurement.product_qty - already_done_qty, 0) qty_uos_left = max(procurement.product_uos_qty - already_done_qty_uos, 0) vals = { 'name': procurement.name, 'company_id': procurement.rule_id.company_id.id or procurement.rule_id.location_src_id.company_id.id or procurement.rule_id.location_id.company_id.id or procurement.company_id.id, 'product_id': procurement.product_id.id, 'product_uom': procurement.product_uom.id, 'product_uom_qty': qty_left, 'product_uos_qty': (procurement.product_uos and qty_uos_left) or qty_left, 'product_uos': (procurement.product_uos and procurement.product_uos.id) or procurement.product_uom.id, 'partner_id': procurement.rule_id.partner_address_id.id or (procurement.group_id and procurement.group_id.partner_id.id) or False, 'location_id': procurement.rule_id.location_src_id.id, 'location_dest_id': procurement.location_id.id, 'move_dest_id': procurement.move_dest_id and procurement.move_dest_id.id or False, 'procurement_id': procurement.id, 'rule_id': procurement.rule_id.id, 'procure_method': procurement.rule_id.procure_method, 'origin': procurement.origin, 'picking_type_id': procurement.rule_id.picking_type_id.id, 'group_id': group_id, 'route_ids': [(4, x.id) for x in procurement.route_ids], 'warehouse_id': procurement.rule_id.propagate_warehouse_id.id or procurement.rule_id.warehouse_id.id, 'date': newdate, 'date_expected': newdate, 'propagate': procurement.rule_id.propagate, 'priority': procurement.priority, } return vals def _run(self, cr, uid, procurement, context=None): if procurement.rule_id and procurement.rule_id.action == 'move': if not procurement.rule_id.location_src_id: self.message_post(cr, uid, [procurement.id], body=_('No source location defined!'), context=context) return False move_obj = self.pool.get('stock.move') move_dict = self._run_move_create(cr, uid, procurement, context=context) #create the move as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example) move_obj.create(cr, SUPERUSER_ID, move_dict, context=context) return True return super(procurement_order, self)._run(cr, uid, procurement, context=context) def run(self, cr, uid, ids, autocommit=False, context=None): new_ids = [x.id for x in self.browse(cr, uid, ids, context=context) if x.state not in ('running', 'done', 'cancel')] res = super(procurement_order, self).run(cr, uid, new_ids, autocommit=autocommit, context=context) #after all the procurements are run, check if some created a draft stock move that needs to be confirmed #(we do that in batch because it fasts the picking assignation and the picking state computation) move_to_confirm_ids = [] for procurement in self.browse(cr, uid, new_ids, context=context): if procurement.state == "running" and procurement.rule_id and procurement.rule_id.action == "move": move_to_confirm_ids += [m.id for m in procurement.move_ids if m.state == 'draft'] if move_to_confirm_ids: self.pool.get('stock.move').action_confirm(cr, uid, move_to_confirm_ids, context=context) return res def _check(self, cr, uid, procurement, context=None): ''' Implement the procurement checking for rules of type 'move'. The procurement will be satisfied only if all related moves are done/cancel and if the requested quantity is moved. ''' if procurement.rule_id and procurement.rule_id.action == 'move': uom_obj = self.pool.get('product.uom') # In case Phantom BoM splits only into procurements if not procurement.move_ids: return True cancel_test_list = [x.state == 'cancel' for x in procurement.move_ids] done_cancel_test_list = [x.state in ('done', 'cancel') for x in procurement.move_ids] at_least_one_cancel = any(cancel_test_list) all_done_or_cancel = all(done_cancel_test_list) all_cancel = all(cancel_test_list) if not all_done_or_cancel: return False elif all_done_or_cancel and not all_cancel: return True elif all_cancel: self.message_post(cr, uid, [procurement.id], body=_('All stock moves have been cancelled for this procurement.'), context=context) self.write(cr, uid, [procurement.id], {'state': 'cancel'}, context=context) return False return super(procurement_order, self)._check(cr, uid, procurement, context) def do_view_pickings(self, cr, uid, ids, context=None): ''' This function returns an action that display the pickings of the procurements belonging to the same procurement group of given ids. ''' mod_obj = self.pool.get('ir.model.data') act_obj = self.pool.get('ir.actions.act_window') result = mod_obj.get_object_reference(cr, uid, 'stock', 'do_view_pickings') id = result and result[1] or False result = act_obj.read(cr, uid, [id], context=context)[0] group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id]) result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]" return result def run_scheduler(self, cr, uid, use_new_cursor=False, company_id=False, context=None): ''' Call the scheduler in order to check the running procurements (super method), to check the minimum stock rules and the availability of moves. This function is intended to be run for all the companies at the same time, so we run functions as SUPERUSER to avoid intercompanies and access rights issues. @param self: The object pointer @param cr: The current row, from the database cursor, @param uid: The current user ID for security checks @param ids: List of selected IDs @param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement. This is appropriate for batch jobs only. @param context: A standard dictionary for contextual values @return: Dictionary of values ''' super(procurement_order, self).run_scheduler(cr, uid, use_new_cursor=use_new_cursor, company_id=company_id, context=context) if context is None: context = {} try: if use_new_cursor: cr = openerp.registry(cr.dbname).cursor() move_obj = self.pool.get('stock.move') #Minimum stock rules self._procure_orderpoint_confirm(cr, SUPERUSER_ID, use_new_cursor=use_new_cursor, company_id=company_id, context=context) #Search all confirmed stock_moves and try to assign them confirmed_ids = move_obj.search(cr, uid, [('state', '=', 'confirmed')], limit=None, order='priority desc, date_expected asc', context=context) for x in xrange(0, len(confirmed_ids), 100): move_obj.action_assign(cr, uid, confirmed_ids[x:x + 100], context=context) if use_new_cursor: cr.commit() if use_new_cursor: cr.commit() finally: if use_new_cursor: try: cr.close() except Exception: pass return {} def _get_orderpoint_date_planned(self, cr, uid, orderpoint, start_date, context=None): date_planned = start_date + relativedelta(days=orderpoint.product_id.seller_delay or 0.0) return date_planned.strftime(DEFAULT_SERVER_DATE_FORMAT) def _prepare_orderpoint_procurement(self, cr, uid, orderpoint, product_qty, context=None): return { 'name': orderpoint.name, 'date_planned': self._get_orderpoint_date_planned(cr, uid, orderpoint, datetime.today(), context=context), 'product_id': orderpoint.product_id.id, 'product_qty': product_qty, 'company_id': orderpoint.company_id.id, 'product_uom': orderpoint.product_uom.id, 'location_id': orderpoint.location_id.id, 'origin': orderpoint.name, 'warehouse_id': orderpoint.warehouse_id.id, 'orderpoint_id': orderpoint.id, 'group_id': orderpoint.group_id.id, } def _product_virtual_get(self, cr, uid, order_point): product_obj = self.pool.get('product.product') return product_obj._product_available(cr, uid, [order_point.product_id.id], context={'location': order_point.location_id.id})[order_point.product_id.id]['virtual_available'] def _procure_orderpoint_confirm(self, cr, uid, use_new_cursor=False, company_id = False, context=None): ''' Create procurement based on Orderpoint :param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement. This is appropriate for batch jobs only. ''' if context is None: context = {} if use_new_cursor: cr = openerp.registry(cr.dbname).cursor() orderpoint_obj = self.pool.get('stock.warehouse.orderpoint') procurement_obj = self.pool.get('procurement.order') dom = company_id and [('company_id', '=', company_id)] or [] orderpoint_ids = orderpoint_obj.search(cr, uid, dom) prev_ids = [] while orderpoint_ids: ids = orderpoint_ids[:100] del orderpoint_ids[:100] for op in orderpoint_obj.browse(cr, uid, ids, context=context): try: prods = self._product_virtual_get(cr, uid, op) if prods is None: continue if float_compare(prods, op.product_min_qty, precision_rounding=op.product_uom.rounding) < 0: qty = max(op.product_min_qty, op.product_max_qty) - prods reste = op.qty_multiple > 0 and qty % op.qty_multiple or 0.0 if float_compare(reste, 0.0, precision_rounding=op.product_uom.rounding) > 0: qty += op.qty_multiple - reste if float_compare(qty, 0.0, precision_rounding=op.product_uom.rounding) <= 0: continue qty -= orderpoint_obj.subtract_procurements(cr, uid, op, context=context) qty_rounded = float_round(qty, precision_rounding=op.product_uom.rounding) if qty_rounded > 0: proc_id = procurement_obj.create(cr, uid, self._prepare_orderpoint_procurement(cr, uid, op, qty_rounded, context=context), context=context) self.check(cr, uid, [proc_id]) self.run(cr, uid, [proc_id]) if use_new_cursor: cr.commit() except OperationalError: if use_new_cursor: orderpoint_ids.append(op.id) cr.rollback() continue else: raise if use_new_cursor: cr.commit() if prev_ids == ids: break else: prev_ids = ids if use_new_cursor: cr.commit() cr.close() return {} # (c) 2012, Michael DeHaan # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ lookup: indexed_items author: Michael DeHaan version_added: "1.3" short_description: rewrites lists to return 'indexed items' description: - use this lookup if you want to loop over an array and also get the numeric index of where you are in the array as you go - any list given will be transformed with each resulting element having the it's previous position in item.0 and its value in item.1 options: _terms: description: list of items required: True """ EXAMPLES = """ - name: indexed loop demo debug: msg: "at array position {{ item.0 }} there is a value {{ item.1 }}" with_indexed_items: - "{{ some_list }}" """ RETURN = """ _raw: description: - list with each item.0 giving you the postiion and item.1 the value type: list """ from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, terms, variables, **kwargs): if not isinstance(terms, list): raise AnsibleError("with_indexed_items expects a list") items = self._flatten(terms) return list(zip(range(len(items)), items)) import json from psycopg2.extras import Json from django.contrib.postgres import forms, lookups from django.core import exceptions from django.db.models import Field, Transform from django.utils.translation import ugettext_lazy as _ __all__ = ['JSONField'] class JSONField(Field): empty_strings_allowed = False description = _('A JSON object') default_error_messages = { 'invalid': _("Value must be valid JSON."), } def db_type(self, connection): return 'jsonb' def get_transform(self, name): transform = super(JSONField, self).get_transform(name) if transform: return transform return KeyTransformFactory(name) def get_prep_value(self, value): if value is not None: return Json(value) return value def get_prep_lookup(self, lookup_type, value): if lookup_type in ('has_key', 'has_keys', 'has_any_keys'): return value if isinstance(value, (dict, list)): return Json(value) return super(JSONField, self).get_prep_lookup(lookup_type, value) def validate(self, value, model_instance): super(JSONField, self).validate(value, model_instance) try: json.dumps(value) except TypeError: raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def value_to_string(self, obj): value = self.value_from_object(obj) return value def formfield(self, **kwargs): defaults = {'form_class': forms.JSONField} defaults.update(kwargs) return super(JSONField, self).formfield(**defaults) JSONField.register_lookup(lookups.DataContains) JSONField.register_lookup(lookups.ContainedBy) JSONField.register_lookup(lookups.HasKey) JSONField.register_lookup(lookups.HasKeys) JSONField.register_lookup(lookups.HasAnyKeys) class KeyTransform(Transform): def __init__(self, key_name, *args, **kwargs): super(KeyTransform, self).__init__(*args, **kwargs) self.key_name = key_name def as_sql(self, compiler, connection): key_transforms = [self.key_name] previous = self.lhs while isinstance(previous, KeyTransform): key_transforms.insert(0, previous.key_name) previous = previous.lhs lhs, params = compiler.compile(previous) if len(key_transforms) > 1: return "{} #> %s".format(lhs), [key_transforms] + params try: int(self.key_name) except ValueError: lookup = "'%s'" % self.key_name else: lookup = "%s" % self.key_name return "%s -> %s" % (lhs, lookup), params class KeyTransformFactory(object): def __init__(self, key_name): self.key_name = key_name def __call__(self, *args, **kwargs): return KeyTransform(self.key_name, *args, **kwargs) #!/usr/bin/env python # # Copyright 2009 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that test shuffling works.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils # Command to run the gtest_shuffle_test_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_') # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' TEST_FILTER = 'A*.A:A*.B:C*' ALL_TESTS = [] ACTIVE_TESTS = [] FILTERED_TESTS = [] SHARDED_TESTS = [] SHUFFLED_ALL_TESTS = [] SHUFFLED_ACTIVE_TESTS = [] SHUFFLED_FILTERED_TESTS = [] SHUFFLED_SHARDED_TESTS = [] def AlsoRunDisabledTestsFlag(): return '--gtest_also_run_disabled_tests' def FilterFlag(test_filter): return '--gtest_filter=%s' % (test_filter,) def RepeatFlag(n): return '--gtest_repeat=%s' % (n,) def ShuffleFlag(): return '--gtest_shuffle' def RandomSeedFlag(n): return '--gtest_random_seed=%s' % (n,) def RunAndReturnOutput(extra_env, args): """Runs the test program and returns its output.""" environ_copy = os.environ.copy() environ_copy.update(extra_env) return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output def GetTestsForAllIterations(extra_env, args): """Runs the test program and returns a list of test lists. Args: extra_env: a map from environment variables to their values args: command line flags to pass to gtest_shuffle_test_ Returns: A list where the i-th element is the list of tests run in the i-th test iteration. """ test_iterations = [] for line in RunAndReturnOutput(extra_env, args).split('\n'): if line.startswith('----'): tests = [] test_iterations.append(tests) elif line.strip(): tests.append(line.strip()) # 'TestCaseName.TestName' return test_iterations def GetTestCases(tests): """Returns a list of test cases in the given full test names. Args: tests: a list of full test names Returns: A list of test cases from 'tests', in their original order. Consecutive duplicates are removed. """ test_cases = [] for test in tests: test_case = test.split('.')[0] if not test_case in test_cases: test_cases.append(test_case) return test_cases def CalculateTestLists(): """Calculates the list of tests run under different flags.""" if not ALL_TESTS: ALL_TESTS.extend( GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0]) if not ACTIVE_TESTS: ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0]) if not FILTERED_TESTS: FILTERED_TESTS.extend( GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0]) if not SHARDED_TESTS: SHARDED_TESTS.extend( GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [])[0]) if not SHUFFLED_ALL_TESTS: SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations( {}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0]) if not SHUFFLED_ACTIVE_TESTS: SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) if not SHUFFLED_FILTERED_TESTS: SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0]) if not SHUFFLED_SHARDED_TESTS: SHUFFLED_SHARDED_TESTS.extend( GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) class GTestShuffleUnitTest(gtest_test_utils.TestCase): """Tests test shuffling.""" def setUp(self): CalculateTestLists() def testShufflePreservesNumberOfTests(self): self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS)) self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS)) self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS)) self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS)) def testShuffleChangesTestOrder(self): self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS) self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS) self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS, SHUFFLED_FILTERED_TESTS) self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS, SHUFFLED_SHARDED_TESTS) def testShuffleChangesTestCaseOrder(self): self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS), GetTestCases(SHUFFLED_ALL_TESTS)) self.assert_( GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS), GetTestCases(SHUFFLED_ACTIVE_TESTS)) self.assert_( GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS), GetTestCases(SHUFFLED_FILTERED_TESTS)) self.assert_( GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS), GetTestCases(SHUFFLED_SHARDED_TESTS)) def testShuffleDoesNotRepeatTest(self): for test in SHUFFLED_ALL_TESTS: self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_ACTIVE_TESTS: self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_FILTERED_TESTS: self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_SHARDED_TESTS: self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test), '%s appears more than once' % (test,)) def testShuffleDoesNotCreateNewTest(self): for test in SHUFFLED_ALL_TESTS: self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_ACTIVE_TESTS: self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_FILTERED_TESTS: self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_SHARDED_TESTS: self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,)) def testShuffleIncludesAllTests(self): for test in ALL_TESTS: self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,)) for test in ACTIVE_TESTS: self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,)) for test in FILTERED_TESTS: self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,)) for test in SHARDED_TESTS: self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,)) def testShuffleLeavesDeathTestsAtFront(self): non_death_test_found = False for test in SHUFFLED_ACTIVE_TESTS: if 'DeathTest.' in test: self.assert_(not non_death_test_found, '%s appears after a non-death test' % (test,)) else: non_death_test_found = True def _VerifyTestCasesDoNotInterleave(self, tests): test_cases = [] for test in tests: [test_case, _] = test.split('.') if test_cases and test_cases[-1] != test_case: test_cases.append(test_case) self.assertEqual(1, test_cases.count(test_case), 'Test case %s is not grouped together in %s' % (test_case, tests)) def testShuffleDoesNotInterleaveTestCases(self): self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS) def testShuffleRestoresOrderAfterEachIteration(self): # Get the test lists in all 3 iterations, using random seed 1, 2, # and 3 respectively. Google Test picks a different seed in each # iteration, and this test depends on the current implementation # picking successive numbers. This dependency is not ideal, but # makes the test much easier to write. [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) # Make sure running the tests with random seed 1 gets the same # order as in iteration 1 above. [tests_with_seed1] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1)]) self.assertEqual(tests_in_iteration1, tests_with_seed1) # Make sure running the tests with random seed 2 gets the same # order as in iteration 2 above. Success means that Google Test # correctly restores the test order before re-shuffling at the # beginning of iteration 2. [tests_with_seed2] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(2)]) self.assertEqual(tests_in_iteration2, tests_with_seed2) # Make sure running the tests with random seed 3 gets the same # order as in iteration 3 above. Success means that Google Test # correctly restores the test order before re-shuffling at the # beginning of iteration 3. [tests_with_seed3] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(3)]) self.assertEqual(tests_in_iteration3, tests_with_seed3) def testShuffleGeneratesNewOrderInEachIteration(self): [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) self.assert_(tests_in_iteration1 != tests_in_iteration2, tests_in_iteration1) self.assert_(tests_in_iteration1 != tests_in_iteration3, tests_in_iteration1) self.assert_(tests_in_iteration2 != tests_in_iteration3, tests_in_iteration2) def testShuffleShardedTestsPreservesPartition(self): # If we run M tests on N shards, the same M tests should be run in # total, regardless of the random seeds used by the shards. [tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '0'}, [ShuffleFlag(), RandomSeedFlag(1)]) [tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [ShuffleFlag(), RandomSeedFlag(20)]) [tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '2'}, [ShuffleFlag(), RandomSeedFlag(25)]) sorted_sharded_tests = tests1 + tests2 + tests3 sorted_sharded_tests.sort() sorted_active_tests = [] sorted_active_tests.extend(ACTIVE_TESTS) sorted_active_tests.sort() self.assertEqual(sorted_active_tests, sorted_sharded_tests) if __name__ == '__main__': gtest_test_utils.Main() # This file is a part of MediaDrop (http://www.mediadrop.net), # Copyright 2009-2015 MediaDrop contributors # For the exact contribution history, see the git revision log. # The source code contained in this file is licensed under the GPLv3 or # (at your option) any later version. # See LICENSE.txt in the main project directory, for more information. from mediadrop.lib.test.pythonic_testcase import * from mediadrop.plugin.events import Event, observes class ObserveDecoratorTest(PythonicTestCase): def test_catches_unknown_keyword_parameters_in_constructor(self): e = assert_raises(TypeError, lambda: observes(Event(), invalid=True)) assert_equals("TypeError: observes() got an unexpected keyword argument 'invalid'", e.args[0]) def probe(self, result): pass def test_can_observe_event(self): event = Event([]) observes(event)(self.probe) assert_length(1, event.observers) assert_equals(self.probe, event.observers[0]) def test_observers_can_request_priority(self): def second_probe(result): pass event = Event([]) observes(event)(self.probe) observes(event, appendleft=True)(second_probe) assert_length(2, event.observers) assert_equals([second_probe, self.probe], list(event.observers)) import unittest def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(ObserveDecoratorTest)) return suite if __name__ == '__main__': unittest.main(defaultTest='suite') # -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2016, Bruno Cauet # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Create freedesktop.org-compliant thumbnails for album folders This plugin is POSIX-only. Spec: standards.freedesktop.org/thumbnail-spec/latest/index.html """ from __future__ import division, absolute_import, print_function from hashlib import md5 import os import shutil from itertools import chain from pathlib import PurePosixPath import ctypes import ctypes.util from xdg import BaseDirectory from beets.plugins import BeetsPlugin from beets.ui import Subcommand, decargs from beets import util from beets.util.artresizer import ArtResizer, get_im_version, get_pil_version import six BASE_DIR = os.path.join(BaseDirectory.xdg_cache_home, "thumbnails") NORMAL_DIR = util.bytestring_path(os.path.join(BASE_DIR, "normal")) LARGE_DIR = util.bytestring_path(os.path.join(BASE_DIR, "large")) class ThumbnailsPlugin(BeetsPlugin): def __init__(self): super(ThumbnailsPlugin, self).__init__() self.config.add({ 'auto': True, 'force': False, 'dolphin': False, }) self.write_metadata = None if self.config['auto'] and self._check_local_ok(): self.register_listener('art_set', self.process_album) def commands(self): thumbnails_command = Subcommand("thumbnails", help=u"Create album thumbnails") thumbnails_command.parser.add_option( u'-f', u'--force', dest='force', action='store_true', default=False, help=u'force regeneration of thumbnails deemed fine (existing & ' u'recent enough)') thumbnails_command.parser.add_option( u'--dolphin', dest='dolphin', action='store_true', default=False, help=u"create Dolphin-compatible thumbnail information (for KDE)") thumbnails_command.func = self.process_query return [thumbnails_command] def process_query(self, lib, opts, args): self.config.set_args(opts) if self._check_local_ok(): for album in lib.albums(decargs(args)): self.process_album(album) def _check_local_ok(self): """Check that's everythings ready: - local capability to resize images - thumbnail dirs exist (create them if needed) - detect whether we'll use PIL or IM - detect whether we'll use GIO or Python to get URIs """ if not ArtResizer.shared.local: self._log.warning(u"No local image resizing capabilities, " u"cannot generate thumbnails") return False for dir in (NORMAL_DIR, LARGE_DIR): if not os.path.exists(dir): os.makedirs(dir) if get_im_version(): self.write_metadata = write_metadata_im tool = "IM" else: assert get_pil_version() # since we're local self.write_metadata = write_metadata_pil tool = "PIL" self._log.debug(u"using {0} to write metadata", tool) uri_getter = GioURI() if not uri_getter.available: uri_getter = PathlibURI() self._log.debug(u"using {0.name} to compute URIs", uri_getter) self.get_uri = uri_getter.uri return True def process_album(self, album): """Produce thumbnails for the album folder. """ self._log.debug(u'generating thumbnail for {0}', album) if not album.artpath: self._log.info(u'album {0} has no art', album) return if self.config['dolphin']: self.make_dolphin_cover_thumbnail(album) size = ArtResizer.shared.get_size(album.artpath) if not size: self._log.warning(u'problem getting the picture size for {0}', album.artpath) return wrote = True if max(size) >= 256: wrote &= self.make_cover_thumbnail(album, 256, LARGE_DIR) wrote &= self.make_cover_thumbnail(album, 128, NORMAL_DIR) if wrote: self._log.info(u'wrote thumbnail for {0}', album) else: self._log.info(u'nothing to do for {0}', album) def make_cover_thumbnail(self, album, size, target_dir): """Make a thumbnail of given size for `album` and put it in `target_dir`. """ target = os.path.join(target_dir, self.thumbnail_file_name(album.path)) if os.path.exists(target) and \ os.stat(target).st_mtime > os.stat(album.artpath).st_mtime: if self.config['force']: self._log.debug(u"found a suitable {1}x{1} thumbnail for {0}, " u"forcing regeneration", album, size) else: self._log.debug(u"{1}x{1} thumbnail for {0} exists and is " u"recent enough", album, size) return False resized = ArtResizer.shared.resize(size, album.artpath, util.syspath(target)) self.add_tags(album, util.syspath(resized)) shutil.move(resized, target) return True def thumbnail_file_name(self, path): """Compute the thumbnail file name See https://standards.freedesktop.org/thumbnail-spec/latest/x227.html """ uri = self.get_uri(path) hash = md5(uri.encode('utf-8')).hexdigest() return util.bytestring_path("{0}.png".format(hash)) def add_tags(self, album, image_path): """Write required metadata to the thumbnail See https://standards.freedesktop.org/thumbnail-spec/latest/x142.html """ mtime = os.stat(album.artpath).st_mtime metadata = {"Thumb::URI": self.get_uri(album.artpath), "Thumb::MTime": six.text_type(mtime)} try: self.write_metadata(image_path, metadata) except Exception: self._log.exception(u"could not write metadata to {0}", util.displayable_path(image_path)) def make_dolphin_cover_thumbnail(self, album): outfilename = os.path.join(album.path, b".directory") if os.path.exists(outfilename): return artfile = os.path.split(album.artpath)[1] with open(outfilename, 'w') as f: f.write('[Desktop Entry]\n') f.write('Icon=./{0}'.format(artfile.decode('utf-8'))) f.close() self._log.debug(u"Wrote file {0}", util.displayable_path(outfilename)) def write_metadata_im(file, metadata): """Enrich the file metadata with `metadata` dict thanks to IM.""" command = ['convert', file] + \ list(chain.from_iterable(('-set', k, v) for k, v in metadata.items())) + [file] util.command_output(command) return True def write_metadata_pil(file, metadata): """Enrich the file metadata with `metadata` dict thanks to PIL.""" from PIL import Image, PngImagePlugin im = Image.open(file) meta = PngImagePlugin.PngInfo() for k, v in metadata.items(): meta.add_text(k, v, 0) im.save(file, "PNG", pnginfo=meta) return True class URIGetter(object): available = False name = "Abstract base" def uri(self, path): raise NotImplementedError() class PathlibURI(URIGetter): available = True name = "Python Pathlib" def uri(self, path): return PurePosixPath(path).as_uri() def copy_c_string(c_string): """Copy a `ctypes.POINTER(ctypes.c_char)` value into a new Python string and return it. The old memory is then safe to free. """ # This is a pretty dumb way to get a string copy, but it seems to # work. A more surefire way would be to allocate a ctypes buffer and copy # the data with `memcpy` or somesuch. s = ctypes.cast(c_string, ctypes.c_char_p).value return b'' + s class GioURI(URIGetter): """Use gio URI function g_file_get_uri. Paths must be utf-8 encoded. """ name = "GIO" def __init__(self): self.libgio = self.get_library() self.available = bool(self.libgio) if self.available: self.libgio.g_type_init() # for glib < 2.36 self.libgio.g_file_get_uri.argtypes = [ctypes.c_char_p] self.libgio.g_file_new_for_path.restype = ctypes.c_void_p self.libgio.g_file_get_uri.argtypes = [ctypes.c_void_p] self.libgio.g_file_get_uri.restype = ctypes.POINTER(ctypes.c_char) self.libgio.g_object_unref.argtypes = [ctypes.c_void_p] def get_library(self): lib_name = ctypes.util.find_library("gio-2") try: if not lib_name: return False return ctypes.cdll.LoadLibrary(lib_name) except OSError: return False def uri(self, path): g_file_ptr = self.libgio.g_file_new_for_path(path) if not g_file_ptr: raise RuntimeError(u"No gfile pointer received for {0}".format( util.displayable_path(path))) try: uri_ptr = self.libgio.g_file_get_uri(g_file_ptr) finally: self.libgio.g_object_unref(g_file_ptr) if not uri_ptr: self.libgio.g_free(uri_ptr) raise RuntimeError(u"No URI received from the gfile pointer for " u"{0}".format(util.displayable_path(path))) try: uri = copy_c_string(uri_ptr) finally: self.libgio.g_free(uri_ptr) try: return uri.decode(util._fsencoding()) except UnicodeDecodeError: raise RuntimeError( "Could not decode filename from GIO: {!r}".format(uri) ) #!/usr/bin/env kross import urllib, Kross, KSpread T = Kross.module("kdetranslation") class MyConfig: def __init__(self): self.url = "http://127.0.0.1:20433" self.sheetRange = "A1:F50" self.cellNameOnSelectionChanged = True self.cellValueOnSelectionChanged = True #TODO self.sheetNameOnSheetChanged = True class MyOrca: def __init__(self, config): self.config = config def _send(self, data): f = urllib.urlopen(self.config.url, data) s = f.read() f.close() return s def speak(self, text): self._send("speak:%s" % text) def stop(self):