def _modeSet(self, match): """ Adds mode flags to a user in the CHANNELS dict """ logging.debug(match.group(0)) channel, mode, nick = match.group(1), match.group(2), match.group(3) if 'o' in mode or 'O' in mode: if nick not in self.channels[channel].ops: self.channels[channel].ops.append(nick) if 'v' in mode or 'V' in mode: if nick not in self.channels[channel].voices: self.channels[channel].voices.append(nick) logging.debug('OPS: %s' % str(self.channels[channel].ops)) logging.debug('VOICES: %s' % str(self.channels[channel].voices)) def _modeUnset(self, match): """ Removes mode flags from a user in the CHANNELS dict """ logging.debug(match.group(0)) channel, mode, nick = match.group(1), match.group(2), match.group(3) if 'o' in mode or 'O' in mode: try: self.channels[channel].ops.remove(nick) except Exception as e: logging.exception(e) if 'v' in mode or 'V' in mode: try: self.channels[channel].voices.remove(nick) except Exception as e: logging.exception(e) logging.debug('OPS: %s' % str(self.channels[channel].ops)) logging.debug('VOICES: %s' % str(self.channels[channel].voices)) #------------------------------------------------------------------------------- def sendMessage(self, target, message): """ Send a message """ self._send("PRIVMSG %s :%s" % (target, message)) def sendNotice(self, target, message): """ Send a notice """ self._send("NOTICE %s :%s" % (target, message)) def checkACC(self, nick): """ Check the acc level of a nick """ self._send("NICKSERV ACC %s" % nick) def joinChannel(self, channel): """ Join a channel """ self._send("JOIN %s" % channel) def partChannel(self, channel): """ Leave a channel """ self._send("PART %s" % channel) def setNick(self, nick): """ Change nickname """ self.nick = nick self.compileRe() self._send("NICK %s" % nick) logging.info('Nick changed!') def setChannelTopic(self, channel, topic): """ Change channel topic """ self._send("TOPIC %s :%s" % (channel, topic)) def kickUser(self, channel, nick, message): """ Kick a user """ self._send("KICK %s %s :%s" % (channel, nick, message)) def quit(self, message="I'll be back!"): """ Send quit message """ self._send("QUIT :%s" % message) #------------------------------------------------------------------------------- def loadCommands(self, commands): """ Loads a dict as self.commands and compiles regex (overwrites all) """ logging.info('Loading commands') self.commands = commands self._pool.map(self._compileCommandRe, self.commands) def addCommand(self, name, regex, func): """ Add a command to the self.commands dict (overwrites commands with the same ) """ self.commands[name] = CommandHandle(regex, func) self._compileCommandRe(name) logging.info('Command: %s added!' % name) def removeCommand(self, name): """ Remove command from the self.commands dict """ del self.commands[name] logging.info('Command: %s removed!' % name) #------------------------------------------------------------------------------- def _compileServerRe(self, command): """ Compiles single server regex by command name """ self._serverRe[command].cregex = [] logging.debug(self._serverRe[command].regex) for item in self._serverRe[command].regex: self._serverRe[command].cregex.append(re.compile(item)) def _compileCommandRe(self, command): """ Compiles single command regex by command name """ self.commands[command].cregex = [] logging.debug(self.commands[command].regex) for item in self.commands[command].regex: self.commands[command].cregex.append(re.compile(item)) def compileRe(self): """ Uses the thread pool to compile all the commands regex """ logging.info('Compiling regex!') self._pool.map(self._compileServerRe, self._serverRe) self._pool.map(self._compileCommandRe, self.commands) def _autoJoin(self): """ Join all the channels in self.autojoin """ for chan in self.joinChans: logging.info('Auto joining: %s' % chan) self.joinChannel(chan) #------------------------------------------------------------------------------- def _sniffLine(self, line): """ Searches the line for anything relevent executes the function for the match """ match = False for name in self._serverRe: for item in self._serverRe[name].cregex: match = item.search(line) if match: self._serverRe[name].function(match) return True def _sniffMessage(self, match): """ Search PRIVMESG/NOTICE for a command executes the function for the match """ nick, host, chan, message = \ match.group(1), match.group(2), match.group(3), match.group(4) cmatch = False logging.info('[%s] %s: %s' % (chan, nick, message)) for name in self.commands: for regex in self.commands[name].cregex: cmatch = regex.search(message) if cmatch: self.commands[name].function(chan, nick, host, cmatch) return True #------------------------------------------------------------------------------- def _identifyNick(self, pswrd): """ Identify bot nickname with nickserv """ self._send("NICKSERV IDENTIFY %s" % (pswrd)) def auth(self, nick): """ Login to the IRC server and identify with nickserv""" logging.info('Authenticating bot with server...') self._send( "USER %s %s %s :This bot is a result of open-source development." %\ (nick, nick, nick) ) self._send("NICK %s" % nick) if self.pswrd: logging.debug('We have a nick password!') self._identifyNick(self.pswrd) logging.info('Waiting on Nickserv...') count = 0 while not self._authed: time.sleep(5) count += 1 if count > 5: raise RuntimeError('Failed to auth with Nickserv') else: self._authed = True def _send(self, message): """ Sends a message to IRC server """ logging.debug("> %s" % message) message = "%s\r\n" % message try: self._sock.send(message.encode("utf-8")) except (socket.timeout, socket.error, ssl.SSLError) as e: logging.warning("Socket Error: Could not send!") logging.exception(e) self._connected = False except Exception as e: logging.exception(e) self._connected, self._running = False, False def _listen(self): """ This should be running in a thread """ logging.info('Listening...') while self._connected: try: data = self._sock.recv(4096) except (socket.timeout, ssl.SSLError) as e: if 'timed out' in e.args[0]: continue else: logging.exception(e) self._connected = False continue except socket.error as e: logging.exception(e) self._connected = False continue else: if len(data) == 0: logging.warn('Listen socket closed!') self._connected = False continue try: data = data.strip(b'\r\n').decode("utf-8") self._listenPool.map(self._sniffLine, data.splitlines()) except Exception as e: logging.exception(e) continue self._listenPool.join() logging.info('No longer listening...') def connect(self): """Connect the socket to the server and listen""" while not self._connected: logging.info("Connecting to %s:%s" % (self.host, str(self.port))) self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.settimeout(2) if not self._sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE): logging.debug('Keeping socket alive') self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) if self.ssl: self._sock = ssl.wrap_socket(self._sock) try: self._sock.connect((self.host, self.port)) except (socket.timeout, socket.error, ssl.SSLError) as e: logging.exception(e) time.sleep(1.0) continue except Exception as e: logging.exception(e) self._connected, self._running = False, False else: logging.info("Connected!") self._connected = True def disconnect(self): """ Disconnect from the server """ logging.info('Disconnecting...') self._connected, self._running, self._authed = False, False, False self.servHost, self.channels = None, {} try: self._pool.close() self._listenPool.close() except Exception as e: logging.exception(e) logging.debug('Pool closed') try: self._pool.join() except Exception as e: logging.exception(e) logging.debug('Pool joined') self._pool = Pool(self.threads) logging.debug('Pool cleared') try: self._listenThread.join() except Exception as e: logging.exception(e) logging.debug('Listen Thread joined(?)') try: self._sock.close() except Exception as e: logging.exception(e) logging.debug('Socket closed(?)') logging.info('Disconnected!') def __call__(self): """ Starts the connection to the server """ self._running = True while self._running: self.compileRe() self._listenThread = Process(name='Listener', target=self._listen) self._listenThread.daemon = True try: self.connect() self._listenThread.start() self.auth(self.nick) except: self.disconnect() continue self._autoJoin() while self._connected: try: time.sleep(0.5) except: self.disconnect() if __name__ == "__main__": logging.basicConfig( format='[%(asctime)s] %(message)s', datefmt="%m-%d %H:%M:%S", level=logging.DEBUG ) logger = logging.getLogger() logger.addHandler( RotatingFileHandler('ircbot.log', maxBytes=10**9, backupCount=5) ) bot = IRCServer('s10tb0t', ssl=True) def shutit(channel, nick, host, match): bot.sendMessage(channel, '%s asked me to quit! See ya!' % nick) bot._running = False bot.quit() bot.addCommand('Quit', r'^!quit', shutit) bot() #!/usr/bin/python ## lists contents of an IPS patch ## stole the processing from meunierd's python-ips ## License: MIT - https://opensource.org/licenses/MIT from os.path import getsize,isfile import struct from sys import argv def print_usage_and_exit(): print "Usage: {script} [IPS patch file]".format(script=argv[0]) exit(1) def unpack_int(string): """Read an n-byte big-endian integer from a byte string.""" (ret,) = struct.unpack_from('>I', b'\x00' * (4 - len(string)) + string) return ret try: patchpath = argv[1] and if (isfile(patchpath) == True): patch_size = getsize(patchpath) except: print_usage_and_exit() with open(patchpath, 'rb') as patchfile: if patchfile.read(5) != b'PATCH': raise Exception('Invalid patch header.') # Read First Record r = patchfile.read(3) while patchfile.tell() not in [patch_size, patch_size - 3]: # Unpack 3-byte pointers. offset = unpack_int(r) # Read size of data chunk r = patchfile.read(2) size = unpack_int(r) if size == 0: # RLE Record r = patchfile.read(2) rle_size = unpack_int(r) data = patchfile.read(1) * rle_size else: data = patchfile.read(size) if offset >= 0: # Write to file print "Offset: {offset}".format(offset=hex(offset)) print "Data : {data}".format(data=repr(data)) # Read Next Record r = patchfile.read(3) import json import logging from django.http import HttpResponse from django.utils.translation import ugettext as _ from celery.states import FAILURE, REVOKED, READY_STATES from instructor_task.api_helper import (get_status_from_instructor_task, get_updated_instructor_task) from instructor_task.models import PROGRESS log = logging.getLogger(__name__) # return status for completed tasks and tasks in progress STATES_WITH_STATUS = [state for state in READY_STATES] + [PROGRESS] def _get_instructor_task_status(task_id): """ Returns status for a specific task. Written as an internal method here (rather than as a helper) so that get_task_completion_info() can be called without causing a circular dependency (since it's also called directly). """ instructor_task = get_updated_instructor_task(task_id) status = get_status_from_instructor_task(instructor_task) if instructor_task is not None and instructor_task.task_state in STATES_WITH_STATUS: succeeded, message = get_task_completion_info(instructor_task) status['message'] = message status['succeeded'] = succeeded return status def instructor_task_status(request): """ View method that returns the status of a course-related task or tasks. Status is returned as a JSON-serialized dict, wrapped as the content of a HTTPResponse. The task_id can be specified to this view in one of two ways: * by making a request containing 'task_id' as a parameter with a single value Returns a dict containing status information for the specified task_id * by making a request containing 'task_ids' as a parameter, with a list of task_id values. Returns a dict of dicts, with the task_id as key, and the corresponding dict containing status information for the specified task_id Task_id values that are unrecognized are skipped. The dict with status information for a task contains the following keys: 'message': on complete tasks, status message reporting on final progress, or providing exception message if failed. For tasks in progress, indicates the current progress. 'succeeded': on complete tasks or tasks in progress, boolean value indicates if the task outcome was successful: did it achieve what it set out to do. This is in contrast with a successful task_state, which indicates that the task merely completed. 'task_id': id assigned by LMS and used by celery. 'task_state': state of task as stored in celery's result store. 'in_progress': boolean indicating if task is still running. 'task_progress': dict containing progress information. This includes: 'attempted': number of attempts made 'succeeded': number of attempts that "succeeded" 'total': number of possible subtasks to attempt 'action_name': user-visible verb to use in status messages. Should be past-tense. 'duration_ms': how long the task has (or had) been running. 'exception': name of exception class raised in failed tasks. 'message': returned for failed and revoked tasks. 'traceback': optional, returned if task failed and produced a traceback. """ output = {} if 'task_id' in request.REQUEST: task_id = request.REQUEST['task_id'] output = _get_instructor_task_status(task_id) elif 'task_ids[]' in request.REQUEST: tasks = request.REQUEST.getlist('task_ids[]') for task_id in tasks: task_output = _get_instructor_task_status(task_id) if task_output is not None: output[task_id] = task_output return HttpResponse(json.dumps(output, indent=4)) def get_task_completion_info(instructor_task): """ Construct progress message from progress information in InstructorTask entry. Returns (boolean, message string) duple, where the boolean indicates whether the task completed without incident. (It is possible for a task to attempt many sub-tasks, such as rescoring many students' problem responses, and while the task runs to completion, some of the students' responses could not be rescored.) Used for providing messages to instructor_task_status(), as well as external calls for providing course task submission history information. """ succeeded = False if instructor_task.task_state not in STATES_WITH_STATUS: return (succeeded, _("No status information available")) # we're more surprised if there is no output for a completed task, but just warn: if instructor_task.task_output is None: log.warning(_("No task_output information found for instructor_task {0}").format(instructor_task.task_id)) return (succeeded, _("No status information available")) try: task_output = json.loads(instructor_task.task_output) except ValueError: fmt = _("No parsable task_output information found for instructor_task {0}: {1}") log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output)) return (succeeded, _("No parsable status information available")) if instructor_task.task_state in [FAILURE, REVOKED]: return (succeeded, task_output.get('message', _('No message provided'))) if any([key not in task_output for key in ['action_name', 'attempted', 'total']]): fmt = _("Invalid task_output information found for instructor_task {0}: {1}") log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output)) return (succeeded, _("No progress status information available")) action_name = _(task_output['action_name']) num_attempted = task_output['attempted'] num_total = task_output['total'] # In earlier versions of this code, the key 'updated' was used instead of # (the more general) 'succeeded'. In order to support history that may contain # output with the old key, we check for values with both the old and the current # key, and simply sum them. num_succeeded = task_output.get('updated', 0) + task_output.get('succeeded', 0) num_skipped = task_output.get('skipped', 0) student = None problem_url = None email_id = None try: task_input = json.loads(instructor_task.task_input) except ValueError: fmt = _("No parsable task_input information found for instructor_task {0}: {1}") log.warning(fmt.format(instructor_task.task_id, instructor_task.task_input)) else: student = task_input.get('student') problem_url = task_input.get('problem_url') email_id = task_input.get('email_id') if instructor_task.task_state == PROGRESS: # special message for providing progress updates: # Translators: {action} is a past-tense verb that is localized separately. {attempted} and {succeeded} are counts. msg_format = _("Progress: {action} {succeeded} of {attempted} so far") elif student is not None and problem_url is not None: # this reports on actions on problems for a particular student: if num_attempted == 0: # Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier. msg_format = _("Unable to find submission to be {action} for student '{student}'") elif num_succeeded == 0: # Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier. msg_format = _("Problem failed to be {action} for student '{student}'") else: succeeded = True # Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier. msg_format = _("Problem successfully {action} for student '{student}'") elif student is None and problem_url is not None: # this reports on actions on problems for all students: if num_attempted == 0: # Translators: {action} is a past-tense verb that is localized separately. msg_format = _("Unable to find any students with submissions to be {action}") elif num_succeeded == 0: # Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count. msg_format = _("Problem failed to be {action} for any of {attempted} students") elif num_succeeded == num_attempted: succeeded = True # Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count. msg_format = _("Problem successfully {action} for {attempted} students") else: # num_succeeded < num_attempted # Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts. msg_format = _("Problem {action} for {succeeded} of {attempted} students") elif email_id is not None: # this reports on actions on bulk emails if num_attempted == 0: # Translators: {action} is a past-tense verb that is localized separately. msg_format = _("Unable to find any recipients to be {action}") elif num_succeeded == 0: # Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count. msg_format = _("Message failed to be {action} for any of {attempted} recipients ") elif num_succeeded == num_attempted: succeeded = True # Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count. msg_format = _("Message successfully {action} for {attempted} recipients") else: # num_succeeded < num_attempted # Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts. msg_format = _("Message {action} for {succeeded} of {attempted} recipients") else: # provide a default: # Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts. msg_format = _("Status: {action} {succeeded} of {attempted}") if num_skipped > 0: # Translators: {skipped} is a count. This message is appended to task progress status messages. msg_format += _(" (skipping {skipped})") if student is None and num_attempted != num_total: # Translators: {total} is a count. This message is appended to task progress status messages. msg_format += _(" (out of {total})") # Update status in task result object itself: message = msg_format.format( action=action_name, succeeded=num_succeeded, attempted=num_attempted, total=num_total, skipped=num_skipped, student=student ) return (succeeded, message) ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # Proofpoint, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetgroupprober import CharSetGroupProber from .utf8prober import UTF8Prober from .sjisprober import SJISProber from .eucjpprober import EUCJPProber from .gb2312prober import GB2312Prober from .euckrprober import EUCKRProber from .cp949prober import CP949Prober from .big5prober import Big5Prober from .euctwprober import EUCTWProber class MBCSGroupProber(CharSetGroupProber): def __init__(self, lang_filter=None): super(MBCSGroupProber, self).__init__(lang_filter=lang_filter) self.probers = [ UTF8Prober(), SJISProber(), EUCJPProber(), GB2312Prober(), EUCKRProber(), CP949Prober(), Big5Prober(), EUCTWProber() ] self.reset() from selenium import webdriver from fixture.session import SessionHelper from fixture.fixture_group import GroupHelper from fixture.navigation import NavigationHelper from fixture.fixture_contact import * class Application: def __init__(self, browser, baseurl): if browser == 'firefox': self.wd = webdriver.Firefox() elif browser == 'chrome': self.wd = webdriver.Chrome() elif browser == 'ie': self.wd = webdriver.Ie() self.wd.implicitly_wait(10) elif browser == 'edge': self.wd = webdriver.Edge() else: raise ValueError('Unrecognized browser %s' % browser) self.session = SessionHelper(self) self.contact = ContactHelper(self) self.group = GroupHelper(self) self.navigation = NavigationHelper(self) self.baseurl = baseurl # метод, проверяющий, валидна ли фикстура def is_valid(self): try: self.wd.current_url return True except: return False def destroy(self): self.wd.quit() # -*- coding: utf-8 -*- """ Tipue Search ============ A Pelican plugin to serialize generated HTML to JSON that can be used by jQuery plugin - Tipue Search. Copyright (c) Talha Mansoor """ from __future__ import unicode_literals import os.path import json from bs4 import BeautifulSoup from codecs import open try: from urlparse import urljoin except ImportError: from urllib.parse import urljoin from pelican import signals class Tipue_Search_JSON_Generator(object): def __init__(self, context, settings, path, theme, output_path, *null): self.output_path = output_path self.context = context self.siteurl = settings.get('SITEURL') self.tpages = settings.get('TEMPLATE_PAGES') self.output_path = output_path self.json_nodes = [] def create_json_node(self, page): if getattr(page, 'status', 'published') != 'published': return soup_title = BeautifulSoup(page.title.replace(' ', ' '), 'html.parser') page_title = soup_title.get_text(' ', strip=True).replace('“', '"').replace('”', '"').replace('’', "'").replace('^', '^') soup_text = BeautifulSoup(page.content, 'html.parser') page_text = soup_text.get_text(' ', strip=True).replace('“', '"').replace('”', '"').replace('’', "'").replace('¶', ' ').replace('^', '^') page_text = ' '.join(page_text.split()) if getattr(page, 'category', 'None') == 'None': page_category = '' else: page_category = page.category.name page_url = self.siteurl + '/' + page.url node = {'title': page_title, 'text': page_text, 'tags': page_category, 'url': page_url} self.json_nodes.append(node) def create_tpage_node(self, srclink): srcfile = open(os.path.join(self.output_path, self.tpages[srclink]), encoding='utf-8') soup = BeautifulSoup(srcfile, 'html.parser') page_text = soup.get_text() # What happens if there is not a title. if soup.title is not None: page_title = soup.title.string else: page_title = '' # Should set default category? page_category = '' page_url = urljoin(self.siteurl, self.tpages[srclink]) node = {'title': page_title, 'text': page_text, 'tags': page_category, 'url': page_url} self.json_nodes.append(node) def generate_output(self, writer): path = os.path.join(self.output_path, 'tipuesearch_content.json') pages = self.context['pages'] + self.context['articles'] for article in self.context['articles']: pages += article.translations for srclink in self.tpages: self.create_tpage_node(srclink) for page in pages: self.create_json_node(page) root_node = {'pages': self.json_nodes} #因為目前改為 local only, 所以蓋掉 .json 建立 #with open(path, 'w', encoding='utf-8') as fd: # 寫出所需要的 .json 檔案 #json.dump(root_node, fd, separators=(',', ':'), ensure_ascii=False) # 以下寫出 .js 檔案, 主要用於近端的 Tipue search js_path = os.path.join(self.output_path, 'tipuesearch_content.js') with open(js_path, 'w', encoding='utf-8') as fd: # 寫出所需要的 .js 檔案 search_text = json.dumps(root_node, separators=(',', ':'), ensure_ascii=False) fd.write('var tipuesearch = ') fd.write(search_text) fd.write(';') def get_generators(generators): return Tipue_Search_JSON_Generator def register(): signals.get_generators.connect(get_generators) # # Helper functions for wxWidgets bakefiles # # import utils # We use 'CFG' option in places where bakefile doesn't like it, so we must # register a substitution function for it that provides additional knowledge # about the option (in this case that it does not contain dir separators and # so utils.nativePaths() doesn't have to do anything with it): try: # this fails in 0.1.4 and 0.1.5 has different subst.callbacks signature: utils.checkBakefileVersion('0.1.5') def __noopSubst(name, func, caller): return '$(%s)' % name except AttributeError: def __noopSubst(func, name): return '$(%s)' % name utils.addSubstituteCallback('CFG', __noopSubst) utils.addSubstituteCallback('LIBDIRNAME', __noopSubst) utils.addSubstituteCallback('SETUPHDIR', __noopSubst) utils.addSubstituteCallback('OBJS', __noopSubst) def mk_wxid(id): """Creates wxWidgets library identifier from bakefile target ID that follows this convention: DLLs end with 'dll', static libraries end with 'lib'. If withPrefix=1, then _wxid is returned instead of wxid.""" if id.endswith('dll') or id.endswith('lib'): wxid = id[:-3] else: wxid = id return wxid # All libs that are part of the main library: MAIN_LIBS = ['mono', 'base', 'core', 'adv', 'html', 'xml', 'net', 'webview', 'media', 'qa', 'xrc', 'aui', 'ribbon', 'propgrid', 'richtext', 'stc'] # List of library names/ids for categories with different names: LIBS_NOGUI = ['xml', 'net'] LIBS_GUI = ['core', 'adv', 'html', 'gl', 'qa', 'xrc', 'media', 'aui', 'propgrid', 'richtext', 'stc', 'ribbon', 'webview'] # Additional libraries that must be linked in: EXTRALIBS = { 'gl' : '$(EXTRALIBS_OPENGL)', 'xml' : '$(EXTRALIBS_XML)', 'html' : '$(EXTRALIBS_HTML)', 'adv' : '$(PLUGIN_ADV_EXTRALIBS)', 'media' : '$(EXTRALIBS_MEDIA)', } def mkLibName(wxid): """Returns string that can be used as library name, including name suffixes, prefixes, version tags etc. This must be kept in sync with variables defined in common.bkl!""" if wxid == 'mono': return '$(WXNAMEPREFIXGUI)$(WXNAMESUFFIX)$(WXVERSIONTAG)$(HOST_SUFFIX)' if wxid == 'base': return '$(WXNAMEPREFIX)$(WXNAMESUFFIX)$(WXVERSIONTAG)$(HOST_SUFFIX)' if wxid in LIBS_NOGUI: return '$(WXNAMEPREFIX)$(WXNAMESUFFIX)_%s$(WXVERSIONTAG)$(HOST_SUFFIX)' % wxid return '$(WXNAMEPREFIXGUI)$(WXNAMESUFFIX)_%s$(WXVERSIONTAG)$(HOST_SUFFIX)' % wxid def mkDllName(wxid): """Returns string that can be used as DLL name, including name suffixes, prefixes, version tags etc. This must be kept in sync with variables defined in common.bkl!""" if wxid == 'mono': return '$(WXDLLNAMEPREFIXGUI)$(WXNAMESUFFIX)$(WXCOMPILER)$(VENDORTAG)$(WXDLLVERSIONTAG)' if wxid == 'base': return '$(WXDLLNAMEPREFIX)$(WXNAMESUFFIX)$(WXCOMPILER)$(VENDORTAG)$(WXDLLVERSIONTAG)' if wxid in LIBS_NOGUI: return '$(WXDLLNAMEPREFIX)$(WXNAMESUFFIX)_%s$(WXCOMPILER)$(VENDORTAG)$(WXDLLVERSIONTAG)' % wxid return '$(WXDLLNAMEPREFIXGUI)$(WXNAMESUFFIX)_%s$(WXCOMPILER)$(VENDORTAG)$(WXDLLVERSIONTAG)' % wxid def libToLink(wxlibname): """Returns string to pass to when linking against 'wxlibname'. For one of main libraries, libToLink('foo') returns '$(WXLIB_FOO)' which must be defined in common.bkl as either nothing (in monolithic build) or mkLibName('foo') (otherwise). """ if wxlibname in MAIN_LIBS: return '$(WXLIB_%s)' % wxlibname.upper() else: return mkLibName(wxlibname) def extraLdflags(wxlibname): if wxlibname in EXTRALIBS: return EXTRALIBS[wxlibname] else: return '' wxVersion = None VERSION_FILE = '../../include/wx/version.h' def getVersion(): """Returns wxWidgets version as a tuple: (major,minor,release).""" global wxVersion if wxVersion == None: f = open(VERSION_FILE, 'rt') lines = f.readlines() f.close() major = minor = release = None for l in lines: if not l.startswith('#define'): continue splitline = l.strip().split() if splitline[0] != '#define': continue if len(splitline) < 3: continue name = splitline[1] value = splitline[2] if value == None: continue if name == 'wxMAJOR_VERSION': major = int(value) if name == 'wxMINOR_VERSION': minor = int(value) if name == 'wxRELEASE_NUMBER': release = int(value) if major != None and minor != None and release != None: break wxVersion = (major, minor, release) return wxVersion def getVersionMajor(): return getVersion()[0] def getVersionMinor(): return getVersion()[1] def getVersionRelease(): return getVersion()[2] def headersOnly(files): """Filters 'files' so that only headers are left. Used with to add headers to VC++ projects but not files such as arrimpl.cpp.""" def callback(cond, sources): prf = suf = '' if sources[0].isspace(): prf=' ' if sources[-1].isspace(): suf=' ' retval = [] for s in sources.split(): if s.endswith('.h'): retval.append(s) return '%s%s%s' % (prf, ' '.join(retval), suf) return utils.substitute2(files, callback) def makeDspDependency(lib): """Returns suitable entry for for main libs.""" return '%s:$(nativePaths(WXTOPDIR))build\\msw\\wx_%s.dsp' % (lib,lib) #! /usr/freeware/bin/python # # This is dump2trj, a program written by Keir E. Novik to convert # Lammps position dump files to Amber trajectory files. # # Copyright 2000, 2001 Keir E. Novik; all rights reserved. # # Modified by Vikas Varshney, U Akron, 5 July 2005, as described in README # #============================================================ def Convert_files(): 'Handle the whole conversion process' print print 'Welcome to dump2trj, a program to convert Lammps position dump files to\nAmber trajectory format!' print Basename_list = Find_dump_files() for Basename in Basename_list: t = Trajectory() if t.Read_dump(Basename): t.Write_trj(Basename) del t print #============================================================ def Find_dump_files(): 'Look for sets of Lammps position dump files to process' '''If passed something on the command line, treat it as a list of files to process. Otherwise, look for *.dump in the current directory. ''' import os, sys Basename_list = [] # Extract basenames from command line for Name in sys.argv[1:]: if Name[-5:] == '.dump': Basename_list.append(Name[:-5]) else: Basename_list.append(Name) if Basename_list == []: print 'Looking for Lammps dump files...', Dir_list = os.listdir('.') for Filename in Dir_list: if Filename[-5:] == '.dump': Basename_list.append(Filename[:-5]) Basename_list.sort() if Basename_list != []: print 'found', for i in range(len(Basename_list)-1): print Basename_list[i] + ',', print Basename_list[-1] + '\n' if Basename_list == []: print 'none.\n' return Basename_list #============================================================ class Snapshot: def __init__(self, The_trajectory): 'Initialise the Snapshot class' self.timestep = The_trajectory.timestep self.atoms = The_trajectory.atoms self.xlo = The_trajectory.xlo self.xhi = The_trajectory.xhi self.ylo = The_trajectory.ylo self.yhi = The_trajectory.yhi self.zlo = The_trajectory.zlo self.zhi = The_trajectory.zhi #-------------------------------------------------------- def Read_dump(self, Lines): 'Read a snapshot (timestep) from a Lammps position dump file' '''Trajectory.Read_dump() will pass us only the lines we need. ''' self.Atom_list = Lines #-------------------------------------------------------- def Write_trj(self, F): 'Write a snapshot (timestep) to an Amber trajectory file' '''The Atom_list must be sorted, as it may not be in order (for example, in a parallel Lammps simulation). ''' import string xBOX = (self.xhi - self.xlo) yBOX = (self.yhi - self.ylo) zBOX = (self.zhi - self.zlo) Min = min(self.xlo, self.ylo, self.zlo) Max = max(self.xhi, self.yhi, self.zhi, xBOX, yBOX, zBOX) if Min <= -1000 or Max >= 10000: print '(error: coordinates too large!)' return Print_list = [] for Line in NumericalSort(self.Atom_list): Item_list = string.split(Line) x = xBOX * (Float(Item_list[2])+Float(Item_list[5])) # Modified main box x-coordinate to actual x-coordinate y = yBOX * (Float(Item_list[3])+Float(Item_list[6])) # Modified main box y-coordinate to actual y-coordinate z = zBOX * (Float(Item_list[4])+Float(Item_list[7])) # Modified main box z-coordinate to actual z-coordinate Print_list.append('%(x)8.3f' % vars()) Print_list.append('%(y)8.3f' % vars()) Print_list.append('%(z)8.3f' % vars()) if len(Print_list) > 9: Line = '' for j in range(10): Line = Line + Print_list[j] Line = Line + '\n' Print_list = Print_list[10:] try: F.write(Line) except IOError, Detail: print '(error:', Detail[1] + '!)' F.close() return if len(Print_list) > 0: Line = '' for j in range(len(Print_list)): Line = Line + Print_list[j] Line = Line + '\n' try: F.write(Line) except IOError, Detail: print '(error:', Detail[1] + '!)' F.close() return Line = '%(xBOX)8.3f%(yBOX)8.3f%(zBOX)8.3f\n' % vars() try: F.write(Line) except IOError, Detail: print '(error:', Detail[1] + '!)' F.close() return #============================================================ class Trajectory: def Read_dump(self, Basename): 'Read a Lammps position dump file' import string, sys Filename = Basename + '.dump' print 'Reading', Filename + '...', sys.stdout.flush() try: F = open(Filename) except IOError, Detail: print '(error:', Detail[1] + '!)' return 0 try: Lines = F.readlines() except IOError, Detail: print '(error:', Detail[1] + '!)' F.close() return 0 F.close() i = 0 self.Snapshot_list = [] # Parse the dump while i < len(Lines): if string.find(Lines[i], 'ITEM: TIMESTEP') != -1: # Read the timestep self.timestep = int(Lines[i+1]) i = i + 2 elif string.find(Lines[i], 'ITEM: NUMBER OF ATOMS') != -1: # Read the number of atoms self.atoms = int(Lines[i+1]) i = i + 2 elif string.find(Lines[i], 'ITEM: BOX BOUNDS') != -1: # Read the periodic box boundaries Item_list = string.split(Lines[i+1]) self.xlo = Float(Item_list[0]) self.xhi = Float(Item_list[1]) Item_list = string.split(Lines[i+2]) self.ylo = Float(Item_list[0]) self.yhi = Float(Item_list[1]) Item_list = string.split(Lines[i+3]) self.zlo = Float(Item_list[0]) self.zhi = Float(Item_list[1]) i = i + 4 elif string.find(Lines[i], 'ITEM: ATOMS') != -1: # Read atom positions self.Snapshot_list.append(Snapshot(self)) Start = i + 1 End = Start + self.atoms self.Snapshot_list[-1].Read_dump(Lines[Start:End]) i = i + self.atoms + 1 else: print '(error: unknown line in file!)' return print 'done.' return 1 #-------------------------------------------------------- def Write_trj(self, Basename): 'Write an Amber trajectory file' import os, sys Filename = Basename + '.mdcrd' Dir_list = os.listdir('.') i = 1 while Filename in Dir_list: Filename = Basename + `i` + '.mdcrd' i = i + 1 del i print 'Writing', Filename + '...', sys.stdout.flush() try: F = open(Filename, 'w') except IOError, Detail: print '(error:', Detail[1] + '!)' return try: F.write(Basename + '\n') except IOError, Detail: print '(error:', Detail[1] + '!)' F.close() return for S in self.Snapshot_list: S.Write_trj(F) F.close() print 'done.' #============================================================ def Float(s): 'Return the string s as a float, if possible' try: x = float(s) except ValueError: if s[-1] == ',': s = s[:-1] x = float(s) return x #============================================================ def NumericalSort(String_list): 'Sort a list of strings by the integer value of the first element' import string Working_list = [] for s in String_list: Working_list.append((int(string.split(s)[0]), s)) Working_list.sort() Return_list = [] for Tuple in Working_list: Return_list.append(Tuple[1]) return Return_list #============================================================ Convert_files() #!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: nxos_igmp version_added: "2.2" short_description: Manages IGMP global configuration. description: - Manages IGMP global configuration configuration settings. author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) notes: - When C(state=default), all supported params will be reset to a default state. - If restart is set to true with other params set, the restart will happen last, i.e. after the configuration takes place. options: flush_routes: description: - Removes routes when the IGMP process is restarted. By default, routes are not flushed. required: false default: null choices: ['true', 'false'] enforce_rtr_alert: description: - Enables or disables the enforce router alert option check for IGMPv2 and IGMPv3 packets. required: false default: null choices: ['true', 'false'] restart: description: - Restarts the igmp process (using an exec config command). required: false default: null choices: ['true', 'false'] state: description: - Manages desired state of the resource. required: false default: present choices: ['present', 'default'] ''' EXAMPLES = ''' - name: Default igmp global params (all params except restart) nxos_igmp: state: default host: "{{ inventory_hostname }}" - name: Ensure the following igmp global config exists on the device nxos_igmp: flush_routes: true enforce_rtr_alert: true host: "{{ inventory_hostname }}" - name: Restart the igmp process nxos_igmp: restart: true host: "{{ inventory_hostname }}" ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: verbose mode type: dict sample: {"enforce_rtr_alert": true, "flush_routes": true} existing: description: k/v pairs of existing IGMP configuration returned: verbose mode type: dict sample: {"enforce_rtr_alert": true, "flush_routes": false} end_state: description: k/v pairs of IGMP configuration after module execution returned: verbose mode type: dict sample: {"enforce_rtr_alert": true, "flush_routes": true} updates: description: commands sent to the device returned: always type: list sample: ["ip igmp flush-routes"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' import re from ansible.module_utils.nxos import get_config, load_config, run_commands from ansible.module_utils.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.netcfg import CustomNetworkConfig PARAM_TO_COMMAND_KEYMAP = { 'flush_routes': 'ip igmp flush-routes', 'enforce_rtr_alert': 'ip igmp enforce-router-alert' } def get_value(arg, config): REGEX = re.compile(r'{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) value = False try: if REGEX.search(config): value = True except TypeError: value = False return value def get_existing(module, args): existing = {} config = str(get_config(module)) for arg in args: existing[arg] = get_value(arg, config) return existing def invoke(name, *args, **kwargs): func = globals().get(name) if func: return func(*args, **kwargs) def get_commands(module, existing, proposed, candidate): commands = list() proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) if module.params['state'] == 'default': for key, value in proposed_commands.items(): if existing_commands.get(key): commands.append('no {0}'.format(key)) else: for key, value in proposed_commands.items(): if value is True: commands.append(key) else: if existing_commands.get(key): commands.append('no {0}'.format(key)) if module.params['restart']: commands.append('restart igmp') if commands: parents = [] candidate.add(commands, parents=parents) def apply_key_map(key_map, table): new_dict = {} for key, value in table.items(): new_key = key_map.get(key) if new_key: value = table.get(key) if value: new_dict[new_key] = value else: new_dict[new_key] = value return new_dict def main(): argument_spec = dict( flush_routes=dict(type='bool'), enforce_rtr_alert=dict(type='bool'), restart=dict(type='bool', default=False), state=dict(choices=['present', 'default'], default='present'), include_defaults=dict(default=False), config=dict(), save=dict(type='bool', default=False) ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) state = module.params['state'] restart = module.params['restart'] if (state == 'default' and (module.params['flush_routes'] is not None or module.params['enforce_rtr_alert'] is not None)): module.fail_json(msg='When state=default other params have no effect.') args = [ "flush_routes", "enforce_rtr_alert", ] existing = invoke('get_existing', module, args) end_state = existing proposed = dict((k, v) for k, v in module.params.items() if v is not None and k in args) proposed_args = proposed.copy() if state == 'default': proposed_args = dict((k, False) for k in args) result = {} if (state == 'present' or (state == 'default' and True in existing.values()) or restart): candidate = CustomNetworkConfig(indent=3) invoke('get_commands', module, existing, proposed_args, candidate) try: response = load_config(module, candidate) result.update(response) except ShellError: exc = get_exception() module.fail_json(msg=str(exc)) else: result['updates'] = [] if restart: proposed['restart'] = restart result['connected'] = module.connected if module._verbosity > 0: end_state = invoke('get_existing', module, args) result['end_state'] = end_state result['existing'] = existing result['proposed'] = proposed result['warnings'] = warnings module.exit_json(**result) if __name__ == '__main__': main() #!/usr/bin/env python """ This tests the general database crud functions """ import os import sys import sqlalchemy sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import legendary_waffle db_engine = sqlalchemy.create_engine("sqlite://") legendary_waffle.models.MODELBASE.metadata.create_all(db_engine) legendary_waffle.models.MODELBASE.metadata.bind = db_engine db_session = sqlalchemy.orm.sessionmaker(bind=db_engine) db = db_session() print legendary_waffle.attribute_read(db) legendary_waffle.model_create(db, legendary_waffle.models.Attribute, name = 'butts') print legendary_waffle.attribute_read(db) legendary_waffle.model_update(db, legendary_waffle.models.Attribute, 1, name = 'poop') print legendary_waffle.model_read(db, legendary_waffle.models.Attribute) print legendary_waffle.model_read(db, legendary_waffle.models.Attribute, 1) legendary_waffle.model_delete(db, legendary_waffle.models.Attribute, 1) print legendary_waffle.model_read(db, legendary_waffle.models.Attribute) # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ An error to represent bad things happening in Conch. Maintainer: Paul Swartz """ from twisted.cred.error import UnauthorizedLogin class ConchError(Exception): def __init__(self, value, data = None): Exception.__init__(self, value, data) self.value = value self.data = data class NotEnoughAuthentication(Exception): """ This is thrown if the authentication is valid, but is not enough to successfully verify the user. i.e. don't retry this type of authentication, try another one. """ class ValidPublicKey(UnauthorizedLogin): """ Raised by public key checkers when they receive public key credentials that don't contain a signature at all, but are valid in every other way. (e.g. the public key matches one in the user's authorized_keys file). Protocol code (eg L{SSHUserAuthServer}) which attempts to log in using L{ISSHPrivateKey} credentials should be prepared to handle a failure of this type by telling the user to re-authenticate using the same key and to include a signature with the new attempt. See U{http://www.ietf.org/rfc/rfc4252.txt} section 7 for more details. """ class IgnoreAuthentication(Exception): """ This is thrown to let the UserAuthServer know it doesn't need to handle the authentication anymore. """ class MissingKeyStoreError(Exception): """ Raised if an SSHAgentServer starts receiving data without its factory providing a keys dict on which to read/write key data. """ class UserRejectedKey(Exception): """ The user interactively rejected a key. """ class InvalidEntry(Exception): """ An entry in a known_hosts file could not be interpreted as a valid entry. """ class HostKeyChanged(Exception): """ The host key of a remote host has changed. @ivar offendingEntry: The entry which contains the persistent host key that disagrees with the given host key. @type offendingEntry: L{twisted.conch.interfaces.IKnownHostEntry} @ivar path: a reference to the known_hosts file that the offending entry was loaded from @type path: L{twisted.python.filepath.FilePath} @ivar lineno: The line number of the offending entry in the given path. @type lineno: L{int} """ def __init__(self, offendingEntry, path, lineno): Exception.__init__(self) self.offendingEntry = offendingEntry self.path = path self.lineno = lineno """ User Management Module This module reads the 'users.conf' file and gets all users's info. """ __all__ = ["UserMgr"] import ConfigParser class UserMgr: """User Manager The format of the user_info is: user_info = { "username": "maple", "password": "valley", "ethernet_interface": "eth0", "dhcp_command": "dhcpcd", "daemon": True, # following has not implemented yet "carry_version_info": True, "broadcast_logoff": False "packet_type": "unicast" } """ def __init__(self, path=None): if path is None: self.users_cfg_path = '/etc/yah3c.conf' else: self.users_cfg_path = path self.config = ConfigParser.ConfigParser() self.config.read(self.users_cfg_path) def save_and_reload(self): fp = open(self.users_cfg_path, 'w') self.config.write(fp) fp.close() self.config.read(self.users_cfg_path) def get_user_number(self): return len(self.config.sections()) def get_all_users_info(self): users_info = [] for username in self.config.sections(): user_info = dict(self.config.items(username)) user_info['username'] = username users_info.append(user_info) return users_info def get_user_info(self, username): user_info = dict(self.config.items(username)) user_info['username'] = username return user_info def add_user(self, user_info): self.config.add_section(user_info['username']) self.update_user_info(user_info) def remove_user(self, username): self.config.remove_section(username) self.save_and_reload() def update_user_info(self, user_info): self.config.set(user_info['username'], 'password', user_info['password']) self.config.set(user_info['username'], 'ethernet_interface', user_info['ethernet_interface']) self.config.set(user_info['username'], 'dhcp_command', user_info['dhcp_command']) self.config.set(user_info['username'], 'daemon', user_info['daemon']) self.save_and_reload() # Copyright 2010 Google Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.gs.user import User from boto.exception import InvalidAclError ACCESS_CONTROL_LIST = 'AccessControlList' ALL_AUTHENTICATED_USERS = 'AllAuthenticatedUsers' ALL_USERS = 'AllUsers' DISPLAY_NAME = 'DisplayName' DOMAIN = 'Domain' EMAIL_ADDRESS = 'EmailAddress' ENTRY = 'Entry' ENTRIES = 'Entries' GROUP_BY_DOMAIN = 'GroupByDomain' GROUP_BY_EMAIL = 'GroupByEmail' GROUP_BY_ID = 'GroupById' ID = 'ID' NAME = 'Name' OWNER = 'Owner' PERMISSION = 'Permission' SCOPE = 'Scope' TYPE = 'type' USER_BY_EMAIL = 'UserByEmail' USER_BY_ID = 'UserById' CannedACLStrings = ['private', 'public-read', 'project-private', 'public-read-write', 'authenticated-read', 'bucket-owner-read', 'bucket-owner-full-control'] """A list of Google Cloud Storage predefined (canned) ACL strings.""" SupportedPermissions = ['READ', 'WRITE', 'FULL_CONTROL'] """A list of supported ACL permissions.""" class ACL: def __init__(self, parent=None): self.parent = parent self.entries = [] @property def acl(self): return self def __repr__(self): # Owner is optional in GS ACLs. if hasattr(self, 'owner'): entries_repr = ['Owner:%s' % self.owner.__repr__()] else: entries_repr = [''] acl_entries = self.entries if acl_entries: for e in acl_entries.entry_list: entries_repr.append(e.__repr__()) return '<%s>' % ', '.join(entries_repr) # Method with same signature as boto.s3.acl.ACL.add_email_grant(), to allow # polymorphic treatment at application layer. def add_email_grant(self, permission, email_address): entry = Entry(type=USER_BY_EMAIL, email_address=email_address, permission=permission) self.entries.entry_list.append(entry) # Method with same signature as boto.s3.acl.ACL.add_user_grant(), to allow # polymorphic treatment at application layer. def add_user_grant(self, permission, user_id): entry = Entry(permission=permission, type=USER_BY_ID, id=user_id) self.entries.entry_list.append(entry) def add_group_email_grant(self, permission, email_address): entry = Entry(type=GROUP_BY_EMAIL, email_address=email_address, permission=permission) self.entries.entry_list.append(entry) def add_group_grant(self, permission, group_id): entry = Entry(type=GROUP_BY_ID, id=group_id, permission=permission) self.entries.entry_list.append(entry) def startElement(self, name, attrs, connection): if name.lower() == OWNER.lower(): self.owner = User(self) return self.owner elif name.lower() == ENTRIES.lower(): self.entries = Entries(self) return self.entries else: return None def endElement(self, name, value, connection): if name.lower() == OWNER.lower(): pass elif name.lower() == ENTRIES.lower(): pass else: setattr(self, name, value) def to_xml(self): s = '<%s>' % ACCESS_CONTROL_LIST # Owner is optional in GS ACLs. if hasattr(self, 'owner'): s += self.owner.to_xml() acl_entries = self.entries if acl_entries: s += acl_entries.to_xml() s += '' % ACCESS_CONTROL_LIST return s class Entries: def __init__(self, parent=None): self.parent = parent # Entries is the class that represents the same-named XML # element. entry_list is the list within this class that holds the data. self.entry_list = [] def __repr__(self): entries_repr = [] for e in self.entry_list: entries_repr.append(e.__repr__()) return '' % ', '.join(entries_repr) def startElement(self, name, attrs, connection): if name.lower() == ENTRY.lower(): entry = Entry(self) self.entry_list.append(entry) return entry else: return None def endElement(self, name, value, connection): if name.lower() == ENTRY.lower(): pass else: setattr(self, name, value) def to_xml(self): s = '<%s>' % ENTRIES for entry in self.entry_list: s += entry.to_xml() s += '' % ENTRIES return s # Class that represents a single (Scope, Permission) entry in an ACL. class Entry: def __init__(self, scope=None, type=None, id=None, name=None, email_address=None, domain=None, permission=None): if not scope: scope = Scope(self, type, id, name, email_address, domain) self.scope = scope self.permission = permission def __repr__(self): return '<%s: %s>' % (self.scope.__repr__(), self.permission.__repr__()) def startElement(self, name, attrs, connection): if name.lower() == SCOPE.lower(): # The following if statement used to look like this: # if not TYPE in attrs: # which caused problems because older versions of the # AttributesImpl class in the xml.sax library neglected to include # a __contains__() method (which Python calls to implement the # 'in' operator). So when you use the in operator, like the if # statement above, Python invokes the __getiter__() method with # index 0, which raises an exception. More recent versions of # xml.sax include the __contains__() method, rendering the in # operator functional. The work-around here is to formulate the # if statement as below, which is the legal way to query # AttributesImpl for containment (and is also how the added # __contains__() method works). At one time gsutil disallowed # xmlplus-based parsers, until this more specific problem was # determined. if TYPE not in attrs: raise InvalidAclError('Missing "%s" in "%s" part of ACL' % (TYPE, SCOPE)) self.scope = Scope(self, attrs[TYPE]) return self.scope elif name.lower() == PERMISSION.lower(): pass else: return None def endElement(self, name, value, connection): if name.lower() == SCOPE.lower(): pass elif name.lower() == PERMISSION.lower(): value = value.strip() if not value in SupportedPermissions: raise InvalidAclError('Invalid Permission "%s"' % value) self.permission = value else: setattr(self, name, value) def to_xml(self): s = '<%s>' % ENTRY s += self.scope.to_xml() s += '<%s>%s' % (PERMISSION, self.permission, PERMISSION) s += '' % ENTRY return s class Scope: # Map from Scope type.lower() to lower-cased list of allowed sub-elems. ALLOWED_SCOPE_TYPE_SUB_ELEMS = { ALL_AUTHENTICATED_USERS.lower() : [], ALL_USERS.lower() : [], GROUP_BY_DOMAIN.lower() : [DOMAIN.lower()], GROUP_BY_EMAIL.lower() : [ DISPLAY_NAME.lower(), EMAIL_ADDRESS.lower(), NAME.lower()], GROUP_BY_ID.lower() : [DISPLAY_NAME.lower(), ID.lower(), NAME.lower()], USER_BY_EMAIL.lower() : [ DISPLAY_NAME.lower(), EMAIL_ADDRESS.lower(), NAME.lower()], USER_BY_ID.lower() : [DISPLAY_NAME.lower(), ID.lower(), NAME.lower()] } def __init__(self, parent, type=None, id=None, name=None, email_address=None, domain=None): self.parent = parent self.type = type self.name = name self.id = id self.domain = domain self.email_address = email_address if self.type.lower() not in self.ALLOWED_SCOPE_TYPE_SUB_ELEMS: raise InvalidAclError('Invalid %s %s "%s" ' % (SCOPE, TYPE, self.type)) def __repr__(self): named_entity = None if self.id: named_entity = self.id elif self.email_address: named_entity = self.email_address elif self.domain: named_entity = self.domain if named_entity: return '<%s: %s>' % (self.type, named_entity) else: return '<%s>' % self.type def startElement(self, name, attrs, connection): if (not name.lower() in self.ALLOWED_SCOPE_TYPE_SUB_ELEMS[self.type.lower()]): raise InvalidAclError('Element "%s" not allowed in %s %s "%s" ' % (name, SCOPE, TYPE, self.type)) return None def endElement(self, name, value, connection): value = value.strip() if name.lower() == DOMAIN.lower(): self.domain = value elif name.lower() == EMAIL_ADDRESS.lower(): self.email_address = value elif name.lower() == ID.lower(): self.id = value elif name.lower() == NAME.lower(): self.name = value else: setattr(self, name, value) def to_xml(self): s = '<%s type="%s">' % (SCOPE, self.type) if (self.type.lower() == ALL_AUTHENTICATED_USERS.lower() or self.type.lower() == ALL_USERS.lower()): pass elif self.type.lower() == GROUP_BY_DOMAIN.lower(): s += '<%s>%s' % (DOMAIN, self.domain, DOMAIN) elif (self.type.lower() == GROUP_BY_EMAIL.lower() or self.type.lower() == USER_BY_EMAIL.lower()): s += '<%s>%s' % (EMAIL_ADDRESS, self.email_address, EMAIL_ADDRESS) if self.name: s += '<%s>%s' % (NAME, self.name, NAME) elif (self.type.lower() == GROUP_BY_ID.lower() or self.type.lower() == USER_BY_ID.lower()): s += '<%s>%s' % (ID, self.id, ID) if self.name: s += '<%s>%s' % (NAME, self.name, NAME) else: raise InvalidAclError('Invalid scope type "%s" ', self.type) s += '' % SCOPE return s # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Check that all of the certs on SQS endpoints validate. """ import unittest from tests.integration import ServiceCertVerificationTest import boto.sns class SNSCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): sns = True regions = boto.sns.regions() def sample_service_call(self, conn): conn.get_all_topics() """This is a repository for items in AGE""" import csv __author__ = "Grant Colasurdo" class Items: """This class manages the meta-info of """ def __init__(self, character): self.character = character class Item: def __init__(self, csv_row): self.item_name: str = csv_row['item_name'] self.weight: float = csv_row['weight'] self.size: float = csv_row['size'] self.value: float = csv_row['value'] self.hands_to_wield: int = csv_row['hands_to_wield'] class Wear: """Handle the meta data for clothing worn by a character Notes ----- Attributes link to places that clothes can be worn. Some positions have 3 layers represented by placement on a list. [Under, Normal, Over] """ def __init__(self, character): self.character = character self.armor: WearLocation = WearLocation(self, "Armor", (False, False, True)) self.feet: WearLocation = WearLocation(self, "Feet", (True, True, True)) self.legs: WearLocation = WearLocation(self, "Legs", (True, True, True)) self.waist: WearLocation = WearLocation(self, "Waist", (False, True, False)) self.torso: WearLocation = WearLocation(self, "Torso", (True, True, True)) self.head: WearLocation = WearLocation(self, "Head", (False, True, True)) self.face: WearLocation = WearLocation(self, "Face", (False, True, False)) self.hands: WearLocation = WearLocation(self, "Hands", (False, True, False)) self.back: WearLocation = WearLocation(self, "Back", (False, True, False)) self.location_list = {'armor', 'feet', 'legs', 'waist', 'torso', 'head', 'face', 'hands', 'back'} @property def worn_weight(self): return sum({self.__dict__[location].weight for location in self.location_list}) class WearSlot: """Each wear slot can hold 1 clothing or armor item""" def __init__(self, wear_location: WearLocation): self.wear_slot = wear_location self.item = None # The normal spot for most clothing self.enabled = False # If the slot is disabled, then no items will not be assignable @property def item(self): return self.item @item.setter def item(self, value: Item): if self.enabled: self.item = value else: print("This equipment slot is disabled") @property def weight(self) -> int: if self.item is None: return 0 else: return self.item.weight class WearLocation: """A position on the body that can be equipped with wearable""" def __init__(self, wear: Wear, location_name, enabled_slots): self.wear_root = wear self.location_name = location_name self.under_slot = WearSlot(self) self.middle_slot = WearSlot(self) self.over_slot = WearSlot(self) self.under_slot.enabled = enabled_slots[0] self.middle_slot.enabled = enabled_slots[1] self.over_slot.enabled = enabled_slots[2] @property def weight(self): return sum({self.over_slot.weight, self.middle_slot.weight, self.over_slot.weight}) class Currency(Item): def __init__(self, csv_row): super().__init__(csv_row) class Container(Item): def __init__(self, csv_row): super(Container, self).__init__(csv_row) self._weight = None self.items = set() self.volume_capacity = csv_row['volume_capacity'] self.weight_capacity = csv_row['weight_capacity'] self.lock = None @property def weight(self) -> float: contained_weight = sum([item.weight for item in self.items]) return contained_weight + self.self_weight @weight.setter def weight(self, value: float): self._weight = value @property def self_weight(self) -> float: return self._weight @self_weight.setter def self_weight(self, value: float): self._weight = value @property def remaining_weight(self): return self.weight_capacity - self.weight @property def occupied_space(self): return sum([item.size for item in self.items]) @property def remaining_space(self): return self.volume_capacity - self.occupied_space def insert(self, item: Item): try: assert self.remaining_space >= item.size self.items.add(item) except AssertionError: print("There is not enough space or spare weight in the container to add") def remove(self, item: Item): try: assert item in self.items self.items.remove(item) except AssertionError: print("That item is not in the container") class Weapon(Item): def __init__(self, csv_row): super().__init__(csv_row) self.damage_rolls = csv_row['weapon_damage'] self.weapon_group = csv_row['weapon_group'] self.minimum_strength = csv_row['minimum_strength'] self.long_range = csv_row['short_range'] self.short_range = csv_row['maximum_range'] self.minimum_range = csv_row['minimum_range'] class Missile(Weapon): def __init__(self, csv_row): self.missile_used = None super().__init__(csv_row) class Armor(Item): def __init__(self, csv_row): self.rating = csv_row['armor_rating'] self.weight_class = csv_row['armor_weight_class'] self.penalty = csv_row['armor_penalty'] self.strain = csv_row['armor_strain'] super().__init__(csv_row) class Shield(Item): def __init__(self, csv_row): self.weight_class = csv_row['armor_weight_class'] self.defense_modifier = csv_row['shield_bonus'] super().__init__(csv_row) class Lock(Item): def __init__(self, csv_row): self.is_locked = None super().__init__(csv_row) class Tool(Item): def __init__(self, csv_row): super(Tool, self).__init__(csv_row) class Traveling(Item): def __init__(self, csv_row): super(Traveling, self).__init__(csv_row) class Clothing(Item): def __init__(self, csv_row): super(Clothing, self).__init__(csv_row) class TradeGoods(Item): def __init__(self, csv_row): super(TradeGoods, self).__init__(csv_row) class ProfessionalGear(Item): def __init__(self, csv_row): super(ProfessionalGear, self).__init__(csv_row) class HomeAndHearth(Item): def __init__(self, csv_row): super(HomeAndHearth, self).__init__(csv_row) class FoodAndLodging(Item): def __init__(self, csv_row): super(FoodAndLodging, self).__init__(csv_row) class Equipment: """This will manage the meta level information for the items used in combat for a character""" def __init__(self, character): self.character = character self.primary_hand = None # Link to an item that the primary hand is holding self.secondary_hand = None # Link to an item that the secondary hand is holding self._backpack = None # Link to an item that is worn on the character's back @property def armor_value(self) -> int: """The amount of protection your armor affords you""" return self.character.wear.armor.over_slot.item.rating @property def armor_penalty(self) -> int: """The penalty applied to speed and Dexterity if untrained in the armor class""" return self.armor.penalty @property def armor_strain(self): """The penalty applied to magic rolls""" return self.character.wear.armor.over_slot.strain @property def armor(self) -> Armor: """Return the armor object being worn by the character""" return self.character.wear.armor.over_slot.item @armor.setter def armor(self, value: Armor): self.character.wear.armor.over_slot.item = value @property def shield_bonus(self): """Return the bonus to defense gained from having a shield""" bonus_value = 0 try: bonus_value = max(self.primary_hand.defense_modifier, bonus_value) except AttributeError: pass try: bonus_value = max(self.secondary_hand.defense_modifer, bonus_value) except AttributeError: pass return bonus_value @property def backpack(self): """Return the backpack item worn by the character""" return self.character.wear.back.middle_slot.item @backpack.setter def backpack(self, value: Container): self.character.wear.back.middle_slot.item = value ITEM_CLASS_DICT = { "Currency": Currency, "Container": Container, "Item": Item, "Lock": Lock, "Tool": Tool, "Missile": Missile, "Traveling": Traveling, "Clothing": Clothing, "Trade Goods": TradeGoods, "Professional Gear": ProfessionalGear, "Home and Hearth": HomeAndHearth, "Food and Lodging": FoodAndLodging, "Weapon": Weapon, "Armor": Armor, "Shield": Shield } def new_item(item_name) -> Item: item = None with open('items.csv', 'r') as file: item_db = csv.DictReader(file) for row in item_db: if row['item_name'] == item_name: item_class = row['item_class'] class_to_call = ITEM_CLASS_DICT[item_class] item = class_to_call(row) return item def init_items(character): character = character character.equipment = Equipment(character) character.items = Items(character) starting_items = set() character.wear.back.middle_slot.item = new_item("Backpack") character.wear.shirt.under_slot.item = new_item("Underclothes") character.wear.feet.middle_slot.item = new_item("Boots") character.wear.waist.middle_slot.item = new_item("Belt") character.wear.legs.middle_slot.item = new_item("Pants") character.wear.torso.middle_slot.item = new_item("Shirt") character.wear.torso.over_slot.item = new_item("Jacket") starting_items.add(new_item("Waterskin")) # -*- coding: utf-8 -*- """Config.""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import logging import os import os.path as op from textwrap import dedent from traitlets.config import (Config, PyFileConfigLoader, JSONFileConfigLoader, ) logger = logging.getLogger(__name__) #------------------------------------------------------------------------------ # Config #------------------------------------------------------------------------------ def phy_config_dir(): """Return the absolute path to the phy user directory.""" home = op.realpath(op.expanduser('~')) return op.join(home, '.phy') def _ensure_dir_exists(path): """Ensure a directory exists.""" if not op.exists(path): os.makedirs(path) assert op.exists(path) and op.isdir(path) def load_config(path=None): """Load a Python or JSON config file.""" if not path or not op.exists(path): return Config() path = op.realpath(path) dirpath, filename = op.split(path) file_ext = op.splitext(path)[1] logger.debug("Load config file `%s`.", path) if file_ext == '.py': config = PyFileConfigLoader(filename, dirpath, log=logger).load_config() elif file_ext == '.json': config = JSONFileConfigLoader(filename, dirpath, log=logger).load_config() return config def _default_config(config_dir=None): path = op.join(config_dir or op.join('~', '.phy'), 'plugins/') return dedent(""" # You can also put your plugins in ~/.phy/plugins/. from phy import IPlugin try: import phycontrib except: pass # Plugin example: # # class MyPlugin(IPlugin): # def attach_to_cli(self, cli): # # you can create phy subcommands here with click # pass c = get_config() c.Plugins.dirs = [r'{}'] """.format(path)) def load_master_config(config_dir=None): """Load a master Config file from `~/.phy/phy_config.py`.""" config_dir = config_dir or phy_config_dir() path = op.join(config_dir, 'phy_config.py') # Create a default config file if necessary. if not op.exists(path): _ensure_dir_exists(op.dirname(path)) logger.debug("Creating default phy config file at `%s`.", path) with open(path, 'w') as f: f.write(_default_config(config_dir=config_dir)) assert op.exists(path) return load_config(path) def save_config(path, config): """Save a config object to a JSON file.""" import json config['version'] = 1 with open(path, 'w') as f: json.dump(config, f) import os import sys import random import unittest sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../.."))) import base_test repo_root = os.path.abspath(os.path.join(__file__, "../../..")) sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver")) from webdriver import exceptions class SendKeysTest(base_test.WebDriverBaseTest): def setUp(self): self.driver.get(self.webserver.where_is("user_input/res/text-form.html")) def test_send_simple_string(self): element = self.driver.find_element_by_id("Text1") element.send_keys("lorem ipsum") self.assertEquals(self.driver.find_element_by_id("text").get_text(), u"lorem ipsum") def test_send_return(self): element = self.driver.find_element_by_id("Text1") returnkey = unichr(int("E006", 16)) element.send_keys([returnkey]) self.assertEquals(u"" + self.driver.get_current_url(), u"" + self.webserver.where_is("user_input/res/text-form-landing.html?e=mc2")) def test_send_backspace(self): element = self.driver.find_element_by_id("Text1") element.send_keys("world ") element.send_keys("wide ") element.send_keys("web ") element.send_keys("consortium") backspace= unichr(int("E003", 16)) for i in range(0, 11): element.send_keys([backspace]) self.assertEquals(self.driver.find_element_by_id("text").get_text(), u"world wide web") def test_send_tab(self): element1 = self.driver.find_element_by_id("Text1") element2 = self.driver.find_element_by_id("Text2") element1.send_keys("typing here") tab= unichr(int("E004", 16)) element1.send_keys([tab]) output = self.driver.find_element_by_id("output") tab_pressed = output.get_attribute("checked") self.assertEquals(tab_pressed, u"true") def test_send_shift(self): element = self.driver.find_element_by_id("Text1") element.send_keys("low ") shift= unichr(int("E008", 16)) element.send_keys([shift , "u", "p", shift]) self.assertEquals(self.driver.find_element_by_id("text").get_text(), u"low UP") def test_send_arrow_keys(self): element = self.driver.find_element_by_id("Text1") element.send_keys("internet") backspace= unichr(int("E003", 16)) left= unichr(int("E012", 16)) right= unichr(int("E014", 16)) for i in range(0, 4): element.send_keys([left]) element.send_keys([backspace]) element.send_keys([right]) element.send_keys("a") self.assertEquals(self.driver.find_element_by_id("text").get_text(), u"intranet") def test_select_text_with_shift(self): element = self.driver.find_element_by_id("Text1") element.send_keys("WebDriver") backspace= unichr(int("E003", 16)) shift= unichr(int("E008", 16)) left= unichr(int("E012", 16)) element.send_keys([shift, left, left, left, left, left, left, backspace]) self.assertEquals(self.driver.find_element_by_id("text").get_text(), u"Web") if __name__ == "__main__": unittest.main() import MySQLdb from MySQLdb.connections import * class DirectMySQLdbConnection(Connection): ### DCR: from MySQLdb connections.py Connection.__init__ def __init__(self, *args, **kwargs): ### DCR: fixed up relative imports from MySQLdb.constants import CLIENT, FIELD_TYPE from MySQLdb.converters import conversions from weakref import proxy, WeakValueDictionary import types kwargs2 = kwargs.copy() conv = kwargs.get('conv', conversions) kwargs2['conv'] = dict([ (k, v) for k, v in conv.items() if type(k) is int ]) self.cursorclass = kwargs2.pop('cursorclass', self.default_cursor) charset = kwargs2.pop('charset', '') if charset: use_unicode = True else: use_unicode = False use_unicode = kwargs2.pop('use_unicode', use_unicode) sql_mode = kwargs2.pop('sql_mode', '') client_flag = kwargs.get('client_flag', 0) ### DCR: fixed up module reference client_version = tuple([ int(n) for n in MySQLdb.connections._mysql.get_client_info().split('.')[:2] ]) if client_version >= (4, 1): client_flag |= CLIENT.MULTI_STATEMENTS if client_version >= (5, 0): client_flag |= CLIENT.MULTI_RESULTS kwargs2['client_flag'] = client_flag ### DCR: skip over the Connection __init__ #super(Connection, self).__init__(*args, **kwargs2) MySQLdb._mysql.connection.__init__(self, *args, **kwargs2) self.encoders = dict([ (k, v) for k, v in conv.items() if type(k) is not int ]) self._server_version = tuple([ int(n) for n in self.get_server_info().split('.')[:2] ]) db = proxy(self) ### DCR: these functions create memory leaks with gc.DEBUG_SAVEALL turned on """ def _get_string_literal(): def string_literal(obj, dummy=None): return db.string_literal(obj) return string_literal def _get_unicode_literal(): def unicode_literal(u, dummy=None): return db.literal(u.encode(unicode_literal.charset)) return unicode_literal def _get_string_decoder(): def string_decoder(s): return s.decode(string_decoder.charset) return string_decoder """ ### DCR: use methods rather than inline-defined functions to prevent memory leaks string_literal = self._get_string_literal(db) self.unicode_literal = unicode_literal = self._get_unicode_literal(db) self.string_decoder = string_decoder = self._get_string_decoder() if not charset: charset = self.character_set_name() self.set_character_set(charset) if sql_mode: self.set_sql_mode(sql_mode) if use_unicode: self.converter[FIELD_TYPE.STRING].insert(-1, (None, string_decoder)) self.converter[FIELD_TYPE.VAR_STRING].insert(-1, (None, string_decoder)) self.converter[FIELD_TYPE.BLOB].insert(-1, (None, string_decoder)) self.encoders[types.StringType] = string_literal self.encoders[types.UnicodeType] = unicode_literal self._transactional = self.server_capabilities & CLIENT.TRANSACTIONS if self._transactional: # PEP-249 requires autocommit to be initially off self.autocommit(False) self.messages = [] ### DCR: make inline-defined functions into member methods to avoid garbage def _string_literal(self, db, obj, dummy=None): return db.string_literal(obj) def _get_string_literal(self, db): return Functor(self._string_literal, db) def _unicode_literal(self, db, u, dummy=None): return db.literal(u.encode(unicode_literal.charset)) def _get_unicode_literal(self, db): return Functor(self._unicode_literal, db) def _string_decoder(self, s): return s.decode(string_decoder.charset) def _get_string_decoder(self): # make it into a Functor since MySQLdb.connections.Connection wants to set # attributes on its string_decoder return Functor(self._string_decoder) def close(self): Connection.close(self) # break garbage cycles self.unicode_literal = None self.string_decoder = None self.encoders = None #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2016 Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # import traceback try: import ovirtsdk4.types as otypes except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ovirt import ( BaseModule, check_sdk, check_params, create_connection, equal, ovirt_full_argument_spec, search_by_name, ) ANSIBLE_METADATA = {'status': 'preview', 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: ovirt_datacenters short_description: Module to manage data centers in oVirt version_added: "2.3" author: "Ondra Machacek (@machacekondra)" description: - "Module to manage data centers in oVirt" options: name: description: - "Name of the the data center to manage." required: true state: description: - "Should the data center be present or absent" choices: ['present', 'absent'] default: present description: description: - "Description of the data center." comment: description: - "Comment of the data center." local: description: - "I(True) if the data center should be local, I(False) if should be shared." - "Default value is set by engine." compatibility_version: description: - "Compatibility version of the data center." quota_mode: description: - "Quota mode of the data center. One of I(disabled), I(audit) or I(enabled)" choices: ['disabled', 'audit', 'enabled'] mac_pool: description: - "MAC pool to be used by this datacenter." - "IMPORTANT: This option is deprecated in oVirt 4.1. You should use C(mac_pool) in C(ovirt_clusters) module, as MAC pools are set per cluster since 4.1." extends_documentation_fragment: ovirt ''' EXAMPLES = ''' # Examples don't contain auth parameter for simplicity, # look at ovirt_auth module to see how to reuse authentication: # Create datacenter - ovirt_datacenters: name: mydatacenter local: True compatibility_version: 4.0 quota_mode: enabled # Remove datacenter - ovirt_datacenters: state: absent name: mydatacenter ''' RETURN = ''' id: description: "ID of the managed datacenter" returned: "On success if datacenter is found." type: str sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c data_center: description: "Dictionary of all the datacenter attributes. Datacenter attributes can be found on your oVirt instance at following url: https://ovirt.example.com/ovirt-engine/api/model#types/datacenter." returned: "On success if datacenter is found." ''' class DatacentersModule(BaseModule): def __get_major(self, full_version): if full_version is None: return None if isinstance(full_version, otypes.Version): return full_version.major return int(full_version.split('.')[0]) def __get_minor(self, full_version): if full_version is None: return None if isinstance(full_version, otypes.Version): return full_version.minor return int(full_version.split('.')[1]) def _get_mac_pool(self): mac_pool = None if self._module.params.get('mac_pool'): mac_pool = search_by_name( self._connection.system_service().mac_pools_service(), self._module.params.get('mac_pool'), ) return mac_pool def build_entity(self): return otypes.DataCenter( name=self._module.params['name'], comment=self._module.params['comment'], description=self._module.params['description'], mac_pool=otypes.MacPool( id=getattr(self._get_mac_pool(), 'id', None), ) if self._module.params.get('mac_pool') else None, quota_mode=otypes.QuotaModeType( self._module.params['quota_mode'] ) if self._module.params['quota_mode'] else None, local=self._module.params['local'], version=otypes.Version( major=self.__get_major(self._module.params['compatibility_version']), minor=self.__get_minor(self._module.params['compatibility_version']), ) if self._module.params['compatibility_version'] else None, ) def update_check(self, entity): minor = self.__get_minor(self._module.params.get('compatibility_version')) major = self.__get_major(self._module.params.get('compatibility_version')) return ( equal(getattr(self._get_mac_pool(), 'id', None), getattr(entity.mac_pool, 'id', None)) and equal(self._module.params.get('comment'), entity.comment) and equal(self._module.params.get('description'), entity.description) and equal(self._module.params.get('quota_mode'), str(entity.quota_mode)) and equal(self._module.params.get('local'), entity.local) and equal(minor, self.__get_minor(entity.version)) and equal(major, self.__get_major(entity.version)) ) def main(): argument_spec = ovirt_full_argument_spec( state=dict( choices=['present', 'absent'], default='present', ), name=dict(default=None, required=True), description=dict(default=None), local=dict(type='bool'), compatibility_version=dict(default=None), quota_mode=dict(choices=['disabled', 'audit', 'enabled']), comment=dict(default=None), mac_pool=dict(default=None), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) check_sdk(module) check_params(module) try: connection = create_connection(module.params.pop('auth')) data_centers_service = connection.system_service().data_centers_service() clusters_module = DatacentersModule( connection=connection, module=module, service=data_centers_service, ) state = module.params['state'] if state == 'present': ret = clusters_module.create() elif state == 'absent': ret = clusters_module.remove() module.exit_json(**ret) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=False) if __name__ == "__main__": main() try: # Python compat < 3.3 from collections.abc import Set, MutableSet, Hashable, Iterable except ImportError: from collections import Set, MutableSet, Hashable, Iterable from collections import defaultdict from functools import reduce from itertools import chain import operator try: # Python compat < 3.4 from functools import partialmethod except ImportError: from .common import partialmethod class _base_multiset(Set): def __init__(self, items=None): self.__bag = {} if isinstance(items, Iterable): for i in items: self.__bag[i] = self.__bag.get(i, 0) + 1 def __contains__(self, item): return self.__bag.get(item, 0) > 0 def __len__(self): return sum(self.__bag.values()) def __iter__(self): for item in self.__bag: for _ in range(self.__bag[item]): yield item def __le__(self, other): if not isinstance(other, _base_multiset) or isinstance(other, _orderable_mixin): raise NotImplementedError() return all((self.count(i) <= other.count(i)) for i in self.__bag) def __eq__(self, other): if not isinstance(other, _base_multiset): raise NotImplementedError() return all((self.count(i) == other.count(i)) for i in chain(self.__bag, other.__bag)) def __lt__(self, other): return (self <= other) and not (self == other) def __gt__(self, other): if not isinstance(other, _base_multiset): raise NotImplementedError() return other < self def __ge__(self, other): if not isinstance(other, _base_multiset): raise NotImplementedError() return other <= self def __combine(self, amnt_op, this_op, other): if isinstance(other, _base_multiset): result = self.__class__() for element in chain(self.__bag, other.__bag): amount = amnt_op(self.count(element), other.count(element)) if amount > 0: result.__bag[element] = amount return result if isinstance(other, Iterable): return this_op(self, self.__class__(other)) raise NotImplementedError() __sub__ = partialmethod(__combine, operator.sub, operator.sub) __add__ = partialmethod(__combine, operator.add, operator.add) __or__ = partialmethod(__combine, max, operator.or_) __and__ = partialmethod(__combine, min, operator.and_) __xor__ = partialmethod(__combine, lambda l, r: abs(l - r), operator.xor) def count(self, item): return self.__bag.get(item, 0) def items(self): return self.__bag.items() class _orderable_mixin(object): # Using the Dershowitz-Manna ordering that gives a well-founded ordering # on multisets if the given carrier is ordered (strings, integers, etc.) # This fails if the union of the sets that are compared has elements that # are incomparible # https://en.wikipedia.org/wiki/Dershowitz%E2%80%93Manna_ordering def __le__(self, other): if not (isinstance(other, _orderable_mixin)): raise NotImplementedError() # using definition by Huet and Oppen M, N = self.count, other.count S = frozenset(self | other) ys = (y for y in S if M(y) > N(y)) return all(any((y < x and M(x) < N(x)) for x in S) for y in ys) def __lt__(self, other): if not (isinstance(other, _orderable_mixin)): raise NotImplementedError() return self != other and self <= other def __gt__(self, other): return not (self <= other) def __ge__(self, other): return not (self < other) class multiset(_base_multiset, MutableSet): def add(self, item): self._base_multiset__bag[item] = self.count(item) + 1 def discard(self, item): bag = self._base_multiset__bag if item in bag: bag[item] = bag[item] - 1 if bag[item] == 0: del bag[item] class frozenmultiset(_base_multiset, Hashable): def __hash__(self): from operator import xor pots = (hash(key)**value for (key, value) in self.items()) return reduce(xor, pots, hash(())) ^ hash(self.__class__) class orderable_multiset(_orderable_mixin, multiset): pass class orderable_frozenmultiset(_orderable_mixin, frozenmultiset): pass class nestable_orderable_frozenmultiset(orderable_frozenmultiset): # Natural multiset extension for nested multisets over an orderable carrier # again gives a well-founded total ordering def __gt__(self, other): if not isinstance(other, self.__class__): return True return super(self.__class__, self).__gt__(other) def __ge__(self, other): if not isinstance(other, self.__class__): return True return super(self.__class__, self).__ge__(other) def __lt__(self, other): if not isinstance(other, self.__class__): return False return super(self.__class__, self).__lt__(other) def __le__(self, other): if not isinstance(other, self.__class__): return False return super(self.__class__, self).__le__(other) """ Classes representing uploaded files. """ import errno import os from io import BytesIO from django.conf import settings from django.core.files.base import File from django.core.files import temp as tempfile from django.utils.encoding import force_str __all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile', 'SimpleUploadedFile') class UploadedFile(File): """ A abstract uploaded file (``TemporaryUploadedFile`` and ``InMemoryUploadedFile`` are the built-in concrete subclasses). An ``UploadedFile`` object behaves somewhat like a file object and represents some file data that the user submitted with a form. """ DEFAULT_CHUNK_SIZE = 64 * 2 ** 10 def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None): super(UploadedFile, self).__init__(file, name) self.size = size self.content_type = content_type self.charset = charset self.content_type_extra = content_type_extra def __repr__(self): return force_str("<%s: %s (%s)>" % ( self.__class__.__name__, self.name, self.content_type)) def _get_name(self): return self._name def _set_name(self, name): # Sanitize the file name so that it can't be dangerous. if name is not None: # Just use the basename of the file -- anything else is dangerous. name = os.path.basename(name) # File names longer than 255 characters can cause problems on older OSes. if len(name) > 255: name, ext = os.path.splitext(name) ext = ext[:255] name = name[:255 - len(ext)] + ext self._name = name name = property(_get_name, _set_name) class TemporaryUploadedFile(UploadedFile): """ A file uploaded to a temporary location (i.e. stream-to-disk). """ def __init__(self, name, content_type, size, charset, content_type_extra=None): if settings.FILE_UPLOAD_TEMP_DIR: file = tempfile.NamedTemporaryFile(suffix='.upload', dir=settings.FILE_UPLOAD_TEMP_DIR) else: file = tempfile.NamedTemporaryFile(suffix='.upload') super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra) def temporary_file_path(self): """ Returns the full path of this file. """ return self.file.name def close(self): try: return self.file.close() except OSError as e: if e.errno != errno.ENOENT: # Means the file was moved or deleted before the tempfile # could unlink it. Still sets self.file.close_called and # calls self.file.file.close() before the exception raise class InMemoryUploadedFile(UploadedFile): """ A file uploaded into memory (i.e. stream-to-memory). """ def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None): super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra) self.field_name = field_name def open(self, mode=None): self.file.seek(0) def chunks(self, chunk_size=None): self.file.seek(0) yield self.read() def multiple_chunks(self, chunk_size=None): # Since it's in memory, we'll never have multiple chunks. return False class SimpleUploadedFile(InMemoryUploadedFile): """ A simple representation of a file, which just has content, size, and a name. """ def __init__(self, name, content, content_type='text/plain'): content = content or b'' super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name, content_type, len(content), None, None) @classmethod def from_dict(cls, file_dict): """ Creates a SimpleUploadedFile object from a dictionary object with the following keys: - filename - content-type - content """ return cls(file_dict['filename'], file_dict['content'], file_dict.get('content-type', 'text/plain')) #!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/thumbor/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 globo.com thumbor@googlegroups.com import unicodedata from os.path import abspath, dirname, join ## # Images used for tests : # - valid image : JPEG 620x465, 69.88 KB # - too small image : JPEG 20x20, 822 B # - too weight image : JPEG 300x400, 85.32 KB ## def get_abs_path(img): return abspath(join(dirname(__file__), img)) VALID_IMAGE_PATH = get_abs_path(u"alabama1_ap620é.jpg") SVG_IMAGE_PATH = get_abs_path(u"Commons-logo.svg") TOO_SMALL_IMAGE_PATH = get_abs_path(u"20x20.jpg") TOO_HEAVY_IMAGE_PATH = get_abs_path(u"Giunchedi%2C_Filippo_January_2015_01.jpg") DEFAULT_IMAGE_PATH = get_abs_path(u"image.jpg") ALABAMA1_IMAGE_PATH = get_abs_path(u"alabama1_ap620%C3%A9.jpg") SPACE_IMAGE_PATH = get_abs_path(u"image%20space.jpg") INVALID_QUANTIZATION_IMAGE_PATH = get_abs_path(u"invalid_quantization.jpg") ANIMATED_IMAGE_PATH = get_abs_path(u"animated.gif") NOT_SO_ANIMATED_IMAGE_PATH = get_abs_path(u"animated-one-frame.gif") def get_image(img): encode_formats = ["NFD", "NFC", "NFKD", "NFKC"] for encode_format in encode_formats: try: with open(unicodedata.normalize(encode_format, img), "rb") as stream: body = stream.read() break except IOError: pass else: raise IOError("%s not found" % img) return body def valid_image(): return get_image(VALID_IMAGE_PATH) def svg_image(): return get_image(SVG_IMAGE_PATH) def too_small_image(): return get_image(TOO_SMALL_IMAGE_PATH) def too_heavy_image(): return get_image(TOO_HEAVY_IMAGE_PATH) def default_image(): return get_image(DEFAULT_IMAGE_PATH) def alabama1(): return get_image(ALABAMA1_IMAGE_PATH) def space_image(): return get_image(SPACE_IMAGE_PATH) def invalid_quantization(): return get_image(INVALID_QUANTIZATION_IMAGE_PATH) def animated_image(): return get_image(ANIMATED_IMAGE_PATH) def not_so_animated_image(): return get_image(NOT_SO_ANIMATED_IMAGE_PATH) #!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from libcloud.loadbalancer.base import Member, Algorithm from libcloud.loadbalancer.types import Provider, State from libcloud.loadbalancer.providers import get_driver def main(): cls = get_driver(Provider.RACKSPACE) driver = cls('username', 'api key', region='ord') balancers = driver.list_balancers() print(balancers) # creating a balancer which balances traffic across two # nodes: 192.168.86.1:80 and 192.168.86.2:8080. Balancer # itself listens on port 80/tcp new_balancer_name = 'testlb' + os.urandom(4).encode('hex') members = (Member(None, '192.168.86.1', 80), Member(None, '192.168.86.2', 8080)) new_balancer = driver.create_balancer(name=new_balancer_name, algorithm=Algorithm.ROUND_ROBIN, port=80, protocol='http', members=members) print(new_balancer) # wait for balancer to become ready # NOTE: in real life code add timeout to not end up in # endless loop when things go wrong on provider side while True: balancer = driver.get_balancer(balancer_id=new_balancer.id) if balancer.state == State.RUNNING: break print('sleeping for 30 seconds for balancers to become ready') time.sleep(30) # fetch list of members members = balancer.list_members() print(members) # remove first member balancer.detach_member(members[0]) # remove the balancer driver.destroy_balancer(new_balancer) if __name__ == '__main__': main() import json from django.conf import settings from django.contrib.auth.decorators import login_required from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render_to_response from django.core.serializers.json import DjangoJSONEncoder from django.test.client import CONTENT_TYPE_RE from django.template import RequestContext class CustomTestException(Exception): pass def no_template_view(request): "A simple view that expects a GET request, and returns a rendered template" return HttpResponse("No template used. Sample content: twice once twice. Content ends.") def staff_only_view(request): "A view that can only be visited by staff. Non staff members get an exception" if request.user.is_staff: return HttpResponse('') else: raise CustomTestException() def get_view(request): "A simple login protected view" return HttpResponse("Hello world") get_view = login_required(get_view) def request_data(request, template='base.html', data='sausage'): "A simple view that returns the request data in the context" return render_to_response(template, { 'get-foo':request.GET.get('foo',None), 'get-bar':request.GET.get('bar',None), 'post-foo':request.POST.get('foo',None), 'post-bar':request.POST.get('bar',None), 'request-foo':request.REQUEST.get('foo',None), 'request-bar':request.REQUEST.get('bar',None), 'data': data, }) def view_with_argument(request, name): """A view that takes a string argument The purpose of this view is to check that if a space is provided in the argument, the test framework unescapes the %20 before passing the value to the view. """ if name == 'Arthur Dent': return HttpResponse('Hi, Arthur') else: return HttpResponse('Howdy, %s' % name) def login_protected_redirect_view(request): "A view that redirects all requests to the GET view" return HttpResponseRedirect('/test_client_regress/get_view/') login_protected_redirect_view = login_required(login_protected_redirect_view) def set_session_view(request): "A view that sets a session variable" request.session['session_var'] = 'YES' return HttpResponse('set_session') def check_session_view(request): "A view that reads a session variable" return HttpResponse(request.session.get('session_var', 'NO')) def request_methods_view(request): "A view that responds with the request method" return HttpResponse('request method: %s' % request.method) def return_unicode(request): return render_to_response('unicode.html') def return_undecodable_binary(request): return HttpResponse( b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document http://www.reportlab.com' ) def return_json_file(request): "A view that parses and returns a JSON string as a file." match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE']) if match: charset = match.group(1) else: charset = settings.DEFAULT_CHARSET # This just checks that the uploaded data is JSON obj_dict = json.loads(request.body.decode(charset)) obj_json = json.dumps(obj_dict, cls=DjangoJSONEncoder, ensure_ascii=False) response = HttpResponse(obj_json.encode(charset), status=200, content_type='application/json; charset=%s' % charset) response['Content-Disposition'] = 'attachment; filename=testfile.json' return response def check_headers(request): "A view that responds with value of the X-ARG-CHECK header" return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined')) def body(request): "A view that is requested with GET and accesses request.body. Refs #14753." return HttpResponse(request.body) def read_all(request): "A view that is requested with accesses request.read()." return HttpResponse(request.read()) def read_buffer(request): "A view that is requested with accesses request.read(LARGE_BUFFER)." return HttpResponse(request.read(99999)) def request_context_view(request): # Special attribute that won't be present on a plain HttpRequest request.special_path = request.path return render_to_response('request_context.html', context_instance=RequestContext(request, {})) # -*- coding: utf-8 -*- # # wiremock documentation build configuration file, created by # sphinx-quickstart on Sat Nov 17 14:39:01 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'WireMock' copyright = u'2012, Tom Akehurst' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.57' # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_themes'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'yammerdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # For yammerdoc theme html_theme_options = { 'tagline': u'A web service test double for all occasions', 'gradient_start': u'#CCC', 'gradient_end': u'#8CA8BB', 'gradient_text': u'#333', 'gradient_bg': u'#363F45', 'github_page': u'https://github.com/tomakehurst/wiremock', 'mailing_list': u'https://groups.google.com/forum/#!forum/wiremock-user' } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["./_themes"] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'WireMock' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = { 'googlefb1a0443a3c421ae': 'googlefb1a0443a3c421ae.html', } # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'wiremockdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'wiremock.tex', u'wiremock Documentation', u'Tom Akehurst', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'wiremock', u'wiremock Documentation', [u'Tom Akehurst'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'wiremock', u'WireMock Documentation', u'Tom Akehurst', 'WireMock', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'WireMock' epub_author = u'Tom Akehurst' epub_publisher = u'Tom Akehurst' epub_copyright = u'2012, Tom Akehurst' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True import distutils.core import distutils.errors import json import os import os.path import platform import re import shutil import sys import tarfile import tempfile import warnings import zipfile try: import urllib2 except ImportError: from urllib import request as urllib2 from setuptools import setup fzf_version = '0.20.0' version = '0.5.' + fzf_version release_url = ('https://api.github.com/repos/junegunn/fzf-bin/releases/tags/' + fzf_version) asset_filename_re = re.compile( r'^fzf-(?P\d+\.\d+\.\d+)-' r'(?P[^-]+)_(?P[^.]+)' r'.(?Ptgz|tar\.gz|tar\.bz2|zip)$' ) fzf_bin_path = os.path.join(os.path.dirname(__file__), 'iterfzf', 'fzf') fzf_windows_bin_path = os.path.join(os.path.dirname(__file__), 'iterfzf', 'fzf.exe') urllib_retry = 3 def readme(): path = os.path.join(os.path.dirname(__file__), 'README.rst') try: with open(path) as f: return f.read() except IOError: pass def get_fzf_release(access_token=None): filename = 'fzf-{0}-release.json'.format(fzf_version) filepath = os.path.join(os.path.dirname(__file__), filename) try: with open(filepath) as f: d = f.read() except IOError: if access_token: request = urllib2.Request( release_url, headers={'Authorization': 'token ' + access_token}, ) else: request = release_url try: r = urllib2.urlopen(request) except urllib2.HTTPError as e: if e.code == 403 and e.info().get('X-RateLimit-Remaining') == 0: raise RuntimeError( 'GitHub rate limit reached. To increate the limit use ' '-g/--github-access-token option.\n ' + str(e) ) elif e.code == 401 and access_token: raise RuntimeError('Invalid GitHub access token.') raise d = r.read() r.close() mode = 'w' + ('b' if isinstance(d, bytes) else '') try: with open(filename, mode) as f: f.write(d) except IOError: pass try: return json.loads(d) except TypeError: return json.loads(d.decode('utf-8')) def get_fzf_binary_url(plat, arch, access_token=None): release = get_fzf_release(access_token=access_token) for asset in release['assets']: m = asset_filename_re.match(asset['name']) if not m: warnings.warn('unmatched filename: ' + repr(asset['name'])) continue elif m.group('ver') != fzf_version: warnings.warn('unmatched version: ' + repr(asset['name'])) continue elif m.group('plat') == plat and m.group('arch') == arch: return asset['browser_download_url'], m.group('ext') def extract(stream, ext, extract_to): with tempfile.NamedTemporaryFile() as tmp: shutil.copyfileobj(stream, tmp) tmp.flush() tmp.seek(0) if ext == 'zip': z = zipfile.ZipFile(tmp, 'r') try: info, = z.infolist() with open(extract_to, 'wb') as f: f.write(z.read(info)) finally: z.close() elif ext == 'tgz' or ext.startswith('tar.'): tar = tarfile.open(fileobj=tmp) try: member, = [m for m in tar.getmembers() if m.isfile()] rf = tar.extractfile(member) with open(extract_to, 'wb') as wf: shutil.copyfileobj(rf, wf) finally: tar.close() else: raise ValueError('unsupported file format: ' + repr(ext)) def download_fzf_binary(plat, arch, overwrite=False, access_token=None): bin_path = fzf_windows_bin_path if plat == 'windows' else fzf_bin_path if overwrite or not os.path.isfile(bin_path): asset = get_fzf_binary_url(plat, arch, access_token) url, ext = asset if access_token: url = '{0}?access_token={1}'.format(url, access_token) try: r = urllib2.urlopen(url) except urllib2.HTTPError as e: if e.code == 403 and e.info().get('X-RateLimit-Remaining') == 0: raise RuntimeError( 'GitHub rate limit reached. To increate the limit use ' '-g/--github-access-token option.\n ' + str(e) ) elif e.code == 401 and access_token: raise RuntimeError('Invalid GitHub access token.') raise extract(r, ext, bin_path) r.close() mode = os.stat(bin_path).st_mode if not (mode & 0o111): os.chmod(bin_path, mode | 0o111) def get_current_plat_arch(): archs = { 'i686': '386', 'i386': '386', 'x86_64': 'amd64', 'amd64': 'amd64', } machine = platform.machine() if not machine and sys.platform in ('win32', 'cygwin'): bits, linkage = platform.architecture() try: machine = {'32bit': 'i386', '64bit': 'amd64'}[bits] except KeyError: raise ValueError('unsupported architecture: ' + repr((bits, linkage))) machine = machine.lower() if sys.platform.startswith('linux'): archs.update( armv5l='arm5', armv6l='arm6', armv7l='arm7', armv8l='arm8', ) try: arch = archs[machine] except KeyError: raise ValueError('unsupported machine: ' + repr(machine)) if sys.platform.startswith('linux'): return 'linux', arch elif sys.platform.startswith('freebsd'): return 'freebsd', arch elif sys.platform.startswith('openbsd'): return 'freebsd', arch elif sys.platform == 'darwin': return 'darwin', arch elif sys.platform in ('win32', 'cygwin'): return 'windows', arch else: raise ValueError('unsupported platform: ' + repr(sys.platform)) class bundle_fzf(distutils.core.Command): description = 'download and bundle a fzf binary' user_options = [ ('plat=', 'p', 'platform e.g. windows, linux, freebsd, darwin'), ('arch=', 'a', 'architecture e.g. 386, amd64, arm8'), ('no-overwrite', 'O', 'do not overwrite if fzf binary exists'), ( 'github-access-token=', 'g', 'GitHub API access token to increate the rate limit', ), ] boolean_options = ['no-overwrite'] def initialize_options(self): try: self.plat, self.arch = get_current_plat_arch() except ValueError: self.plat = None self.arch = None self.no_overwrite = None self.github_access_token = None self.plat_name = None def finalize_options(self): if self.plat is None: raise distutils.errors.DistutilsOptionError( '-p/--plat option is required but missing' ) if self.arch is None: raise distutils.errors.DistutilsOptionError( '-a/--arch option is required but missing' ) try: self.plat_name = self.get_plat_name() except ValueError as e: raise distutils.errors.DistutilsOptionError(str(e)) distutils.log.info('plat_name: %s', self.plat_name) def get_plat_name(self, plat=None, arch=None): plat = plat or self.plat arch = arch or self.arch if plat == 'linux': arch_tags = { '386': 'i686', 'amd64': 'x86_64', 'arm5': 'armv5l', 'arm6': 'armv6l', 'arm7': 'armv7l', 'arm8': 'armv8l', } try: arch_tag = arch_tags[arch] except KeyError: raise ValueError('unsupported arch: ' + repr(arch)) return 'manylinux1_' + arch_tag elif plat in ('freebsd', 'openbsd'): arch_tags = {'386': 'i386', 'amd64': 'amd64'} try: arch_tag = arch_tags[arch] except KeyError: raise ValueError('unsupported arch: ' + repr(arch)) return '{0}_{1}'.format(plat, arch_tag) elif plat == 'darwin': if arch == '386': archs = 'i386', elif arch == 'amd64': archs = 'intel', 'x86_64' else: raise ValueError('unsupported arch: ' + repr(arch)) macs = 10, 11, 12 return '.'.join('macosx_10_{0}_{1}'.format(mac, arch) for mac in macs for arch in archs) elif plat == 'windows': if arch == '386': return 'win32' elif arch == 'amd64': return 'win_amd64' else: raise ValueError('unsupported arch: ' + repr(arch)) else: raise ValueError('unsupported plat: ' + repr(plat)) def run(self): dist = self.distribution try: bdist_wheel = dist.command_options['bdist_wheel'] except KeyError: self.warn( 'this comamnd is intended to be used together with bdist_wheel' ' (e.g. "{0} {1} bdist_wheel")'.format( dist.script_name, ' '.join(dist.script_args) ) ) else: typename = type(self).__name__ bdist_wheel.setdefault('universal', (typename, True)) plat_name = self.plat_name bdist_wheel.setdefault('plat_name', (typename, plat_name)) bdist_wheel_cls = dist.cmdclass['bdist_wheel'] get_tag_orig = bdist_wheel_cls.get_tag def get_tag(self): # monkeypatch bdist_wheel.get_tag() if self.plat_name_supplied and self.plat_name == plat_name: return get_tag_orig(self)[:2] + (plat_name,) return get_tag_orig(self) bdist_wheel_cls.get_tag = get_tag download_fzf_binary(self.plat, self.arch, overwrite=not self.no_overwrite, access_token=self.github_access_token) if dist.package_data is None: dist.package_data = {} dist.package_data.setdefault('iterfzf', []).append( 'fzf.exe' if self.plat == 'windows' else 'fzf' ) setup( name='iterfzf', version=version, description='Pythonic interface to fzf', long_description=readme(), url='https://github.com/dahlia/iterfzf', author='Hong Minhee', author_email='hong.minhee' '@' 'gmail.com', license='GPLv3 or later', packages=['iterfzf'], package_data={'iterfzf': ['py.typed']}, cmdclass={'bundle_fzf': bundle_fzf}, python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4', install_requires=['setuptools'], zip_safe=False, include_package_data=True, download_url='https://github.com/dahlia/iterfzf/releases', keywords='fzf', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console :: Curses', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', # noqa: E501 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: BSD :: FreeBSD', 'Operating System :: POSIX :: BSD :: OpenBSD', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Terminals', ] ) from datetime import timedelta, datetime from corehq.apps.users.models import CommCareUser from dimagi.utils.couch.database import get_db from dimagi.utils.parsing import json_format_datetime from string import Template class SuccessMessage(object): """ A helper for rendering the success message templates. #>>> SuccessMessage("Thanks $first_name! You have submitted $today forms today and $week forms since Monday.", userID).render() #u'Thanks Danny! You have submitted 2 forms today and 10 forms since Monday.' Valid strings are username, first_name, name, today, week, total """ def __init__(self, message, userID, domain=None, tz=timedelta(hours=0)): self.message = message self.userID = userID self.tz = tz if domain: self.domain = domain def render(self, quash=True): template = Template(self.message) try: return template.substitute(first_name=self.first_name, name=self.name, today=self.num_forms_today, week=self.num_forms_this_week, total=self.num_forms_all_time) except Exception as e: return '' def check_message(self): Template(self.message).substitute(first_name='', name='', today='', week='', total='') @property def couch_user(self): if not hasattr(self, '_couch_user'): self._couch_user = CommCareUser.get_by_user_id(self.userID) return self._couch_user @property def first_name(self): try: return self.couch_user.first_name except Exception: return "(?)" @property def name(self): try: return "%s %s" % (self.couch_user.first_name, self.couch_user.last_name) except Exception: return "(?)" def get_num_forms_since(self, time): if not hasattr(self, 'domain'): self.domain = self.couch_user.domain if self.couch_user else None from corehq.apps.reports.util import make_form_couch_key key = make_form_couch_key(self.domain, user_id=self.userID) r = get_db().view('reports_forms/all_forms', startkey=key+[json_format_datetime(time)], endkey=key+[{}], group=False ).one() return r['value'] if r else 0 @property def num_forms_this_week(self): now = datetime.utcnow() + self.tz monday = now - timedelta(days=now.weekday()) then = datetime(monday.year, monday.month, monday.day) - self.tz return self.get_num_forms_since(then) @property def num_forms_today(self): now = datetime.utcnow() + self.tz then = datetime(now.year, now.month, now.day) - self.tz return self.get_num_forms_since(then) @property def num_forms_all_time(self): return self.get_num_forms_since(datetime(1970, 1, 1)) # Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import ddt import mock from rally.task.processing import plot from tests.unit import test PLOT = "rally.task.processing.plot." @ddt.ddt class PlotTestCase(test.TestCase): @mock.patch(PLOT + "charts") def test__process_scenario(self, mock_charts): for mock_ins, ret in [ (mock_charts.MainStatsTable, "main_stats"), (mock_charts.MainStackedAreaChart, "main_stacked"), (mock_charts.AtomicStackedAreaChart, "atomic_stacked"), (mock_charts.OutputStackedAreaDeprecatedChart, "output_stacked"), (mock_charts.LoadProfileChart, "load_profile"), (mock_charts.MainHistogramChart, "main_histogram"), (mock_charts.AtomicHistogramChart, "atomic_histogram"), (mock_charts.AtomicAvgChart, "atomic_avg")]: setattr(mock_ins.return_value.render, "return_value", ret) iterations = [ {"timestamp": i + 2, "error": [], "duration": i + 5, "idle_duration": i, "output": {"additive": [], "complete": []}, "atomic_actions": {"foo_action": i + 10}} for i in range(10)] data = {"iterations": iterations, "sla": [], "key": {"kw": {"runner": {"type": "constant"}}, "name": "Foo.bar", "pos": 0}, "info": {"atomic": {"foo_action": {"max_duration": 19, "min_duration": 10}}, "full_duration": 40, "load_duration": 32, "iterations_count": 10, "iterations_passed": 10, "max_duration": 14, "min_duration": 5, "output_names": [], "tstamp_end": 25, "tstamp_start": 2}} task_data = plot._process_scenario(data, 1) self.assertEqual( task_data, { "cls": "Foo", "met": "bar", "name": "bar [2]", "pos": "1", "runner": "constant", "config": json.dumps( {"Foo.bar": [{"runner": {"type": "constant"}}]}, indent=2), "full_duration": 40, "load_duration": 32, "atomic": {"histogram": "atomic_histogram", "iter": "atomic_stacked", "pie": "atomic_avg"}, "iterations": {"histogram": "main_histogram", "iter": "main_stacked", "pie": [("success", 10), ("errors", 0)]}, "iterations_count": 10, "errors": [], "load_profile": "load_profile", "additive_output": [], "complete_output": [[], [], [], [], [], [], [], [], [], []], "output_errors": [], "sla": [], "sla_success": True, "table": "main_stats"}) @mock.patch(PLOT + "_process_scenario") @mock.patch(PLOT + "json.dumps", return_value="json_data") def test__process_tasks(self, mock_json_dumps, mock__process_scenario): tasks_results = [{"key": {"name": i, "kw": "kw_" + i}} for i in ("a", "b", "c", "b")] mock__process_scenario.side_effect = lambda a, b: ( {"cls": "%s_cls" % a["key"]["name"], "name": str(b), "met": "dummy", "pos": str(b)}) source, tasks = plot._process_tasks(tasks_results) self.assertEqual(source, "json_data") mock_json_dumps.assert_called_once_with( {"a": ["kw_a"], "b": ["kw_b", "kw_b"], "c": ["kw_c"]}, sort_keys=True, indent=2) self.assertEqual( tasks, [{"cls": "a_cls", "met": "dummy", "name": "0", "pos": "0"}, {"cls": "b_cls", "met": "dummy", "name": "0", "pos": "0"}, {"cls": "b_cls", "met": "dummy", "name": "1", "pos": "1"}, {"cls": "c_cls", "met": "dummy", "name": "0", "pos": "0"}]) @ddt.data({}, {"include_libs": True}, {"include_libs": False}) @ddt.unpack @mock.patch(PLOT + "_process_tasks") @mock.patch(PLOT + "objects") @mock.patch(PLOT + "ui_utils.get_template") @mock.patch(PLOT + "json.dumps", side_effect=lambda s: "json_" + s) def test_plot(self, mock_dumps, mock_get_template, mock_objects, mock__process_tasks, **ddt_kwargs): mock__process_tasks.return_value = "source", "scenarios" mock_get_template.return_value.render.return_value = "tasks_html" mock_objects.Task.extend_results.return_value = ["extended_result"] tasks_results = [ {"key": "foo_key", "sla": "foo_sla", "result": "foo_result", "full_duration": "foo_full_duration", "load_duration": "foo_load_duration"}] html = plot.plot(tasks_results, **ddt_kwargs) self.assertEqual(html, "tasks_html") generic_results = [ {"id": None, "created_at": None, "updated_at": None, "task_uuid": None, "key": "foo_key", "data": {"raw": "foo_result", "full_duration": "foo_full_duration", "sla": "foo_sla", "load_duration": "foo_load_duration"}}] mock_objects.Task.extend_results.assert_called_once_with( generic_results) mock_get_template.assert_called_once_with("task/report.html") mock__process_tasks.assert_called_once_with(["extended_result"]) if "include_libs" in ddt_kwargs: mock_get_template.return_value.render.assert_called_once_with( data="json_scenarios", source="json_source", include_libs=ddt_kwargs["include_libs"]) else: mock_get_template.return_value.render.assert_called_once_with( data="json_scenarios", source="json_source", include_libs=False) #!/usr/bin/python import cStringIO as StringIO from fnmatch import fnmatch import difflib import os import sys def get_name(filename): return os.path.splitext(filename)[0] def list_dir(dir_path, filter_func): return sorted(filter(filter_func, os.listdir(dir_path)), key=get_name) def main(): test_dir = os.path.dirname(os.path.realpath(__file__)) testcase_dir = os.path.join(test_dir, 'testcases') testcase_file = os.path.join(test_dir, 'testcases.js') def is_testcase_file(filename): return ( fnmatch(filename, "*.html") and not fnmatch(filename, "manual-test*") and not fnmatch(filename, "disabled-*")) new_testcases = StringIO.StringIO() new_testcases.write("""\ // This file is automatically generated by test/update-testcases.py. // Disable tests by adding them to test/disabled-testcases """) new_testcases.write('var tests = [\n \'') new_testcases.write( '\',\n \''.join(list_dir(testcase_dir, is_testcase_file))) new_testcases.write('\',\n];\n') new_testcases.seek(0) new_testcases_lines = new_testcases.readlines() current_testcases_lines = file(testcase_file).readlines() lines = list(difflib.unified_diff( current_testcases_lines, new_testcases_lines, fromfile=testcase_file, tofile=testcase_file)) if len(lines) == 0: sys.stdout.write('Nothing to do\n') sys.exit(0) if not "--dry-run" in sys.argv: file(testcase_file, "w").write("".join(new_testcases_lines)) sys.stdout.write( 'Updating %s with the following diff.\n' % testcase_file) for line in lines: sys.stdout.write(line) sys.exit(1) if __name__ == '__main__': main() from scrapy.spider import BaseSpider from scrapy.selector import HtmlXPathSelector from scrapy.http import Request, HtmlResponse from scrapy.utils.url import urljoin_rfc from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader class OutdoorWorldSpider(BaseSpider): name = 'outdoorworld.co.uk' allowed_domains = ['outdoorworld.co.uk'] start_urls = ['http://www.outdoorworld.co.uk/tents/tents-by-size?limit=all'] def parse(self, response): hxs = HtmlXPathSelector(response) products = hxs.select('//div[@class="category-products"]/ul/li') for product in products: loader = ProductLoader(item=Product(), selector=product) loader.add_xpath('url', 'a/@href') loader.add_xpath('name', 'h2/a/@title') xpath = ('div[@class="price-box"]/' 'p[@class="special-price"]/span[@class="price"]/text()') if product.select(xpath): loader.add_xpath('price', xpath) else: xpath = ('div[@class="price-box"]/' 'p[@class="minimal-price"]/span[@class="price"]/text()') if product.select(xpath): loader.add_xpath('price', xpath) else: xpath = ('div[@class="price-box"]/' 'span[@class="regular-price"]/' 'span[@class="price"]/text()') if product.select(xpath): loader.add_xpath('price', xpath) yield loader.load_item() # leitet SMS-Nachricht an MQTT-Broker weiter # SIM-Modul an ttyS1 # Ansteuerung mit AT-Befehlen # SMS-Format topic:payload # initpin() einmal zu Beginn import serial import time import paho.mqtt.client as mqtt port=serial.Serial("/dev/ttyS1",9600) #port.open() client=mqtt.Client() # MQTT-Server client.connect("x.x.x.x",1883,60) def initpin(): # SIM-PIN port.write("AT+CPIN=xxxx\r\n") out='' time.sleep(0.5) while port.inWaiting()>0: out+=port.read(1) print out def readmsg1(): port.write("AT+CMGR=1\r\n") out='' time.sleep(0.5) while port.inWaiting()>0: out+=port.read(1) if len(out)<7: print "keine Nachricht." else: print out nrstart=out.find('"+') nrend=out.find('"',nrstart+1) nr=out[nrstart+1:nrend] endline=out.find('\n',2) mess=out[endline+1:] endmess=mess.find('\n')-1 mess=mess[:endmess] # erlaubte Nummer if nr != "xxxxxxxxxxxxxx": print "ungueltige Nummer." else: print "Signal erhalten." # print "Message:"+mess endtopic=mess.find(':') topic=mess[:endtopic] payload=mess[endtopic+1:] # print "Topic:"+topic # print "Payload:"+payload client.publish(topic,payload) port.write('AT+CMGD=1\r\n') while(1): readmsg1() time.sleep(10) port.close() doctests = """ >>> k = "old value" >>> { k: None for k in range(10) } {0: None, 1: None, 2: None, 3: None, 4: None, 5: None, 6: None, 7: None, 8: None, 9: None} >>> k 'old value' >>> { k: k+10 for k in range(10) } {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17, 8: 18, 9: 19} >>> g = "Global variable" >>> { k: g for k in range(10) } {0: 'Global variable', 1: 'Global variable', 2: 'Global variable', 3: 'Global variable', 4: 'Global variable', 5: 'Global variable', 6: 'Global variable', 7: 'Global variable', 8: 'Global variable', 9: 'Global variable'} >>> { k: v for k in range(10) for v in range(10) if k == v } {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9} >>> { k: v for v in range(10) for k in range(v*9, v*10) } {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4, 38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6, 55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7, 66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8, 76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9, 85: 9, 86: 9, 87: 9, 88: 9, 89: 9} >>> { x: y for y, x in ((1, 2), (3, 4)) } = 5 # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... SyntaxError: ... >>> { x: y for y, x in ((1, 2), (3, 4)) } += 5 # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... SyntaxError: ... """ __test__ = {'doctests' : doctests} def test_main(verbose=None): import sys from test import support from test import test_dictcomps support.run_doctest(test_dictcomps, verbose) # verify reference counting if verbose and hasattr(sys, "gettotalrefcount"): import gc counts = [None] * 5 for i in range(len(counts)): support.run_doctest(test_dictcomps, verbose) gc.collect() counts[i] = sys.gettotalrefcount() print(counts) if __name__ == "__main__": test_main(verbose=True) ## # Copyright 2009-2020 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # https://github.com/easybuilders/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see . ## """ EasyBuild support for building and installing toy extensions, implemented as an easyblock @author: Kenneth Hoste (Ghent University) """ from easybuild.framework.easyconfig import CUSTOM from easybuild.framework.extensioneasyblock import ExtensionEasyBlock from easybuild.easyblocks.toy import EB_toy from easybuild.tools.run import run_cmd class Toy_Extension(ExtensionEasyBlock): """Support for building/installing toy.""" @staticmethod def extra_options(): """Custom easyconfig parameters for toy extensions.""" extra_vars = { 'toy_ext_param': ['', "Toy extension parameter", CUSTOM], } return ExtensionEasyBlock.extra_options(extra_vars=extra_vars) def run(self): """Build toy extension.""" if self.src: super(Toy_Extension, self).run(unpack_src=True) EB_toy.configure_step(self.master, name=self.name) EB_toy.build_step(self.master, name=self.name, buildopts=self.cfg['buildopts']) if self.cfg['toy_ext_param']: run_cmd(self.cfg['toy_ext_param']) EB_toy.install_step(self.master, name=self.name) return self.module_generator.set_environment('TOY_EXT_%s' % self.name.upper(), self.name) def sanity_check_step(self, *args, **kwargs): """Custom sanity check for toy extensions.""" self.log.info("Loaded modules: %s", self.modules_tool.list()) custom_paths = { 'files': [], 'dirs': ['.'], # minor hack to make sure there's always a non-empty list } if self.src: custom_paths['files'].extend(['bin/%s' % self.name, 'lib/lib%s.a' % self.name]) return super(Toy_Extension, self).sanity_check_step(custom_paths=custom_paths) #!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = """ --- module: ios_command version_added: "2.1" author: "Peter Sprygada (@privateip)" short_description: Run commands on remote devices running Cisco IOS description: - Sends arbitrary commands to an ios node and returns the results read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met. - This module does not support running commands in configuration mode. Please use M(ios_config) to configure IOS devices. extends_documentation_fragment: ios options: commands: description: - List of commands to send to the remote ios device over the configured provider. The resulting output from the command is returned. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or the number of retries has expired. required: true wait_for: description: - List of conditions to evaluate against the output of the command. The task will wait for each condition to be true before moving forward. If the conditional is not true within the configured number of retries, the task fails. See examples. required: false default: null aliases: ['waitfor'] version_added: "2.2" match: description: - The I(match) argument is used in conjunction with the I(wait_for) argument to specify the match policy. Valid values are C(all) or C(any). If the value is set to C(all) then all conditionals in the wait_for must be satisfied. If the value is set to C(any) then only one of the values must be satisfied. required: false default: all choices: ['any', 'all'] version_added: "2.2" retries: description: - Specifies the number of retries a command should by tried before it is considered failed. The command is run on the target device every retry and evaluated against the I(wait_for) conditions. required: false default: 10 interval: description: - Configures the interval in seconds to wait between retries of the command. If the command does not pass the specified conditions, the interval indicates how long to wait before trying the command again. required: false default: 1 """ EXAMPLES = """ tasks: - name: run show version on remote devices ios_command: commands: show version - name: run show version and check to see if output contains IOS ios_command: commands: show version wait_for: result[0] contains IOS - name: run multiple commands on remote nodes ios_command: commands: - show version - show interfaces - name: run multiple commands and evaluate the output ios_command: commands: - show version - show interfaces wait_for: - result[0] contains IOS - result[1] contains Loopback0 """ RETURN = """ stdout: description: The set of responses from the commands returned: always apart from low level errors (such as action plugin) type: list sample: ['...', '...'] stdout_lines: description: The value of stdout split into a list returned: always apart from low level errors (such as action plugin) type: list sample: [['...', '...'], ['...'], ['...']] failed_conditions: description: The list of conditionals that have failed returned: failed type: list sample: ['...', '...'] """ import time from ansible.module_utils.ios import run_commands from ansible.module_utils.ios import ios_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network_common import ComplexList from ansible.module_utils.netcli import Conditional from ansible.module_utils.six import string_types def to_lines(stdout): for item in stdout: if isinstance(item, string_types): item = str(item).split('\n') yield item def parse_commands(module, warnings): command = ComplexList(dict( command=dict(key=True), prompt=dict(), answer=dict() ), module) commands = command(module.params['commands']) for index, item in enumerate(commands): if module.check_mode and not item['command'].startswith('show'): warnings.append( 'only show commands are supported when using check mode, not ' 'executing `%s`' % item['command'] ) elif item['command'].startswith('conf'): module.fail_json( msg='ios_command does not support running config mode ' 'commands. Please use ios_config instead' ) return commands def main(): """main entry point for module execution """ argument_spec = dict( commands=dict(type='list', required=True), wait_for=dict(type='list', aliases=['waitfor']), match=dict(default='all', choices=['all', 'any']), retries=dict(default=10, type='int'), interval=dict(default=1, type='int') ) argument_spec.update(ios_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) result = {'changed': False} warnings = list() check_args(module, warnings) commands = parse_commands(module, warnings) result['warnings'] = warnings wait_for = module.params['wait_for'] or list() conditionals = [Conditional(c) for c in wait_for] retries = module.params['retries'] interval = module.params['interval'] match = module.params['match'] while retries > 0: responses = run_commands(module, commands) for item in list(conditionals): if item(responses): if match == 'any': conditionals = list() break conditionals.remove(item) if not conditionals: break time.sleep(interval) retries -= 1 if conditionals: failed_conditions = [item.raw for item in conditionals] msg = 'One or more conditional statements have not be satisfied' module.fail_json(msg=msg, failed_conditions=failed_conditions) result.update({ 'changed': False, 'stdout': responses, 'stdout_lines': list(to_lines(responses)) }) module.exit_json(**result) if __name__ == '__main__': main() #!/usr/bin/env python3 """ This file is part of BOMtools. BOMtools is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. BOMTools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with BOMTools. If not, see . """ __author__ = 'srodgers' import argparse import configparser from bommdb import * defaultMpn = 'N/A' defaultDb= '/etc/bommgr/parts.db' defaultConfigLocations = ['/etc/bommgr/bommgr.conf','~/.bommgr/bommgr.conf','bommgr.conf'] firstPn = '800000-101' defaultMID='M0000000' # Yes/no prompt def query_yes_no(question, default="yes"): """Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits . It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". """ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '%s'" % default) while True: sys.stdout.write(question + prompt) choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") # List part numbers, descriptions, manufacturers, manufacturer part numbers def listParts(like=None): global defaultMpn, defaultMfgr global DB res = DB.get_parts(like) print('{0:<20} {1:<50} {2:<30} {3:<20}'.format("Part Num","Title/Description","Manufacturer","MPN")) for (pn,desc) in res: # Try to retrieve manufacturer info minfo = DB.lookup_mpn_by_pn(pn) if minfo == []: # Use defaults if it no MPN and manufacturer minfo =[{'mname': defaultMfgr, 'mpn': defaultMpn}] for i,item in enumerate(minfo): if i > 0: pn = '' desc = '' print('{0:<20} {1:<50} {2:<30} {3:<20}'.format(pn,desc,minfo[i]['mname'],minfo[i]['mpn'])) # List manufacturers def listMfgrs(): global DB print('{0:<30}'.format("Manufacturer")) minfo = DB.get_mfgrs() if(minfo is not None): for mfgr in minfo: print('{0:<30}'.format(mfgr[0])) # Add a new manufacturer to the manufacturer's list def addMfgr(new_mfgr): global DB # Does it already exist? minfo = DB.lookup_mfg(new_mfgr) if(minfo is not None): print('Error: Manufacturer "{}" is already in the manufacturer\'s list'.format(new_mfgr)) raise(ValueError) # Get the last used ID and generate the next ID to be used mid = DB.last_mid() if mid is not None: mid = int(mid[1:]) + 1 else: mid = 0 nextid = 'M{num:07d}'.format(num=mid) DB.add_mfg_to_mlist(new_mfgr, nextid) print("Manufacturer {} added".format(new_mfgr)) # Validate a part number to ensure it is in the correct 6-3 format def validatePN(pn): try: (prefix, suffix) = pn.split('-') except ValueError: print('Error: Bad part number format, needs to be XXXXXX-YYY') raise(ValueError) if len(prefix) != 6 or len(suffix)!= 3: print('Error: Bad part number format, needs to be XXXXXX-YYY') raise(ValueError) # Return the next available part number def nextPN(): global DB # Get the last used part number and generate the next ID to be used res = DB.last_pn() # If this is the very first part number added use the default for firstpn if res is None or res[0] is None: pn = firstPn else: pn = res (prefix, suffix) = pn.split('-') nextnum = int(prefix) + 1 pn = '{prefix:06d}-{suffix:03d}'.format(prefix=nextnum, suffix=101) return pn # Add a new part to the database def newPart(desc, newpn = None, mfg='', mpn=''): global DB pinfo = None if(len(desc) == 0 or len(desc) > 50): print("Error: Description must be between 1 and 50 characters") sys.exit(2) # Define the next part number to be used if(newpn is not None): # User defined part number, need to validate it pinfo = DB.lookup_pn(newpn) if(pinfo is not None): print('Error: Part number {} already exists'.format(newpn)) sys.exit(2) validatePN(newpn) pn = newpn else: pn = nextPN() # Avoid duplicate part number assignment if mpn is not N/A if mpn is not None: minfo = DB.lookup_mpn(mpn) # This is a check to see if the MPM exists anywhere else if mpn != defaultMpn and minfo is not None and minfo[1] == mfg: print("Error: MPN already exists with same manufacturer under part number {}".format(minfo[0])) sys.exit(2) # Check to see if the manufacturer exists minfo = DB.lookup_mfg(mfg) if minfo is None: # Manufacturer doesn't exist, create it addMfgr(mfg) # Get its ID minfo = DB.lookup_mfg(mfg) mname = minfo[0] mid = minfo[1] # We now have a valid pn, desc, mname, mpn, and mid. Insert the pn and description in the pndesc table, # and insert the pn, mid, and mpn in the pnmpn table DB.add_pn(pn, desc, mid, mpn) return pn # Query by MPN and print results if the MPN exists def queryMPN(mpn): global DB res = DB.lookup_mpn(mpn) if(res is None): print("MPN does not exist") return pn = res[0] mname = res[1] mpn = res[2] res = DB.lookup_pn(pn) desc = res[1] print('{0:<20} {1:<50} {2:<30} {3:<20}'.format("Part Num","Title/Description","Manufacturer","MPN")) print('{0:<20} {1:<50} {2:<30} {3:<20}'.format(pn, desc ,mname , mpn)) # Query by PN and print results if the PN exists # prints multiple lines if there are multiple MPN's mapped to a PN def queryPN(pn): global defaultMpn, defaultMfgr global DB res = DB.lookup_pn(pn) if(res is None): print("Part number does not exist") return pn = res[0] desc = res[1] print('{0:<20} {1:<50} {2:<30} {3:<20}'.format("Part Num","Title/Description","Manufacturer","MPN")) res = DB.lookup_mpn_by_pn(pn) if(len(res) is not 0): for item in res: print('{0:<20} {1:<50} {2:<30} {3:<20}'.format(item['pn'], desc ,item['mname'], item['mpn'])) else: print('{0:<20} {1:<50} {2:<30} {3:<20}'.format(pn, desc, defaultMfgr, defaultMpn)) # Modify mpn def modifyMPN(partnumber, curmpn, newmpn): global DB res = DB.lookup_mfg_by_pn_mpn(partnumber, curmpn) if res is None: print('Error: Can\'t get current MPN record') raise SystemError mid = res[1] DB.update_mpn(partnumber, curmpn, newmpn, mid) # Modify manufacturer name for a given part number and MPN def modifyMFG(partnumber, curpn, curmpn, newmfgid): global DB res = DB.lookup_mfg_by_pn_mpn(curpn, curmpn) if res is None: print('Error: Unknown MPN {}'.format(curmpn)) raise SystemError # Extract old mid oldmfgid = res[1] # Check to see new mid exists res = DB.lookup_mfg_by_id(newmfgid) if res is None: print('Error: Unknown manufacturer ID {}'.format(newmfgid)) raise SystemError # Update the manufacturer part record DB.update_mid(partnumber, curmpn, oldmfgid, newmfgid) if __name__ == '__main__': conn = None cur = None parser = argparse.ArgumentParser(description = 'BOM Manager Utility', prog = 'bommgr.py') parser.add_argument('--specdb', help='Specify database file path') parser.add_argument('--config', help='Specify config file path', default=None) subparsers = parser.add_subparsers(dest = 'operation', help='Run bommgr.py {command} -h for additional help') parser_nextpn = subparsers.add_parser('nextpn', help='Get next unassigned part number') # List sub sub-parser parser_list = subparsers.add_parser('list', help='List items') parser_list_subparser = parser_list.add_subparsers(dest='listwhat', help='List parts or manufacturers') parser_list_pn = parser_list_subparser.add_parser('parts', help='List part numbers') parser_list_pn.add_argument('--like', help="Return like matches only") parser_list_mpn = parser_list_subparser.add_parser('mfg', help='List manufacturers') # Query sub-subparser parser_query = subparsers.add_parser('query', help='Query something') parser_query_subparser = parser_query.add_subparsers(dest='querywhat', help='Query a part or MPN') parser_query_pn = parser_query_subparser.add_parser('pn', help='Query part number') parser_query_pn.add_argument('partnumber', help='Part Number') parser_query_mpn = parser_query_subparser.add_parser('mpn', help='Query manufacturer\'s part number') parser_query_mpn.add_argument('mpartnumber', help='Part Number') # Add sub-subparser parser_add = subparsers.add_parser('add', help='Add new part') parser_add_subparser = parser_add.add_subparsers(dest='addwhat', help='Add a part or MPN') # Add part parser_add_part = parser_add_subparser.add_parser('part',help='Add part') parser_add_part.add_argument('title', help='Title (Part Description)') # title is mandatory for add part parser_add_part.add_argument('--mpn', dest="mpn", help="Manufacturer's part number") parser_add_part.add_argument('--mfg', dest="manufacturer", help="Manufacturer name") parser_add_part.add_argument('--specpn', help="Specify PN") # Add mpn parser_add_mpn = parser_add_subparser.add_parser('altmpn',help='Add alternate MPN to existing part') parser_add_mpn.add_argument('part', help='Part number') # part number is mandatory for add mpn parser_add_mpn.add_argument('mpn', help='Manufacturer Part number') # part number is mandatory for add mpn parser_add_mpn.add_argument('manufacturer', help='Manufacturer Name') # Manufacturer name is mandatory parser_add_mpn.add_argument('--forcenewmfg', action='store_true', help="Force add of new manufacturer from name given") # Modify sub-subparser parser_modify = subparsers.add_parser('modify', help='Modify a title, or manufacturer\'s part number (MPN)') parser_modify_title_subparser = parser_modify.add_subparsers(dest='modifywhat', help='Modify a title') # Modify title parser_modify_title = parser_modify_title_subparser.add_parser('title',help='New title/description to use') parser_modify_title.add_argument('partnumber', help='Part number to look up') parser_modify_title.add_argument('title', help='New title to use') # Modify MPN parser_modify_mpn = parser_modify_title_subparser.add_parser('mpn',help='New manufacturer\'s part number to use') parser_modify_mpn.add_argument('partnumber', help='Part number to look up') parser_modify_mpn.add_argument('curmpn', help='Current MPN') parser_modify_mpn.add_argument('newmpn', help='New MPN') # Modify MFG parser_modify_mfg = parser_modify_title_subparser.add_parser('mfg',help='New manufacturer to use') parser_modify_mfg.add_argument('partnumber', help='Part number to look up') parser_modify_mfg.add_argument('curmpn', help='Current MPN') parser_modify_mfg.add_argument('manufacturer', help='New Manufacturer') parser_modify_mfg.add_argument('--forcenewmfg', action='store_true', help="Force add of new manufacturer from name given") # Modify manufacturer in manufacturer's list parser_modify_mlistmfg = parser_modify_title_subparser.add_parser('mlistmfg',help='Modify manufacturer name in manufacturer\'s list') parser_modify_mlistmfg.add_argument('curmfg', help='Current Manufacturer') parser_modify_mlistmfg.add_argument('newmfg', help='New Manufacturer') ## Parser code end ## Customize default configurations to user's home directory for i in range(0, len(defaultConfigLocations)): defaultConfigLocations[i] = os.path.expanduser(defaultConfigLocations[i]) # parse the args and die on error args = parser.parse_args() # Read the config file, if any config = configparser.ConfigParser() if(args.config is not None): configLocation = os.path.expanduser(args.config) else: configLocation = defaultConfigLocations config.read(configLocation) try: general = config['general'] except KeyError: print('Warning: no config file found') general = None # Open the database file # If database specified in args, override default and config path if args.specdb is not None: db = os.path.expanduser(args.specdb) else: if general is not None: db = os.path.expanduser(general.get('db', defaultDb)) print(db) else: db = defaultDb # Check to see if we can access the database file and that it is writable if(os.path.isfile(db) == False): print('Error: Database file {} doesn\'t exist'.format(db)) raise(SystemError) if(os.access(db,os.W_OK) == False): print('Error: Database file {} is not writable'.format(db)) raise(SystemError) DB = BOMdb(db) print() print("Info: Database used: {}".format(os.path.abspath(db))) print() # Look up default manufacturer res = DB.lookup_mfg_by_id(defaultMID) if(res is None): defaultMfgr = 'Default MFG Error' else: defaultMfgr = res[0] # if nextpn, print the next available part number if args.operation is None: print('Error: no operation specified') sys.exit(2) if args.operation == 'nextpn': print(nextPN()) sys.exit(0) if args.operation == 'list': if args.listwhat == 'mfg': listMfgrs() elif args.listwhat == 'parts': listParts(args.like) else: print('Error: unknown list option {}'.format(args.listwhat)) sys.exit(2) sys.exit(0) # Query by pn or mpn if args.operation == 'query' : if args.querywhat == 'pn': queryPN(args.partnumber) elif args.querywhat == 'mpn': queryMPN(args.mpartnumber) else: print('Error: unknown query option {}'.format(args.querywhat)) sys.exit(2) sys.exit(0) # Add a part number or manufacturer if args.operation == 'add': if args.addwhat == 'part': title = args.title if args.specpn: pn = args.specpn else: pn = nextPN() mname = defaultMfgr if args.manufacturer: mname = args.manufacturer mpn = defaultMpn if args.mpn: mpn = args.mpn if True: print('About to add:') print() print("MPN : {}".format(mpn)) print("Manufacturer : {}".format(mname)) print() print("as {}, {}".format(pn, title)) print() if query_yes_no('Add new part?','no') is False: sys.exit(0) pn = newPart(title, pn, mname, mpn) print('New part number added: {}'.format(pn)) elif args.addwhat == 'altmpn': pn = args.part mpn = args.mpn mname = args.manufacturer res = DB.lookup_pn(pn) # Sanity checks if res is None : print('Error: no such part number {}'.format(pn)) sys.exit(2) desc = res[1] res = DB.lookup_mpn(mpn) if res is not None: print('Error: MPN {} is already in the database'.format(mpn)) sys.exit(2) minfo = DB.lookup_mfg(mname) if args.forcenewmfg is False and minfo is None: print('Error: Manufacturer {} is not in the database. Add with --forcenewmfg'.format(mname)) sys.exit(2) if True: print('About to add:') print() print("MPN : {}".format(mpn)) print("Manufacturer : {}".format(mname)) print() print("to {}, {}".format(pn, desc)) print() if query_yes_no('Add alternate mpn?','no') is False: sys.exit(0) if(minfo is None): # Add new manufacturer if it doesn't exist addMfgr(mname) minfo = DB.lookup_mfg(mname) mid = minfo[1] DB.add_mpn(pn, mid, mpn) print("Alternate MPN added") else: print('Unrecognized addwhat option') sys.exit(2) # Modify a title or an MPN if args.operation == 'modify': partnumber = '' if args.modifywhat in ['title', 'mpn', 'mfg']: partnumber = args.partnumber res = DB.lookup_pn(partnumber) if(res is None): print('Error: no such part number {}'.format(partnumber)) sys.exit(2) # Modify title if args.modifywhat == 'title': DB.update_title(partnumber, args.title) # Modify mpn elif args.modifywhat == 'mpn' : curmpn = args.curmpn newmpn = args.newmpn res = DB.lookup_mfg_by_pn_mpn(partnumber, curmpn) if(res is None): print('Error: no such manufacturer part number {}'.format(curmpn)) sys.exit(2) modifyMPN(partnumber, curmpn, newmpn) # Modify manufacturer elif args.modifywhat == 'mfg': curpn = args.partnumber curmpn = args.curmpn mfgr = args.manufacturer res = DB.lookup_mfg_by_pn_mpn(curpn, curmpn) if(res is None): print('Error: no such manufacturer part number {}'.format(curmpn)) sys.exit(2) # See if new mfgr already exists res = DB.lookup_mfg(mfgr) if res is None: if args.forcenewmfg: # Create new manufacturer addMfgr(mfgr) # Get the newly assigned mid res = DB.lookup_mfg(mfgr) else: print('Error: New manufacturer {} not in database. Add with --forcenewmfg'.format(mfgr)) sys.exit(2) newmid = res[1] modifyMFG(partnumber, curpn, curmpn, newmid) # Modify menufacturer name in manufacturer's list elif args.modifywhat == 'mlistmfg': curmfg = args.curmfg newmfg = args.newmfg res = DB.lookup_mfg(curmfg) if(res is None): print('Error: Manufacturer not in database') sys.exit(2) mid = res[1] if(DB.lookup_mfg(newmfg) is not None): print('Error: New Manufacturer already in database') sys.exit(2) DB.update_mfg(mid, newmfg) else: print('Error: unrecognized modifywhat option') sys.exit(2) """ .. _tut_evoked_objects: The :class:`Evoked ` data structure: evoked/averaged data ===================================================================== The :class:`Evoked ` data structure is mainly used for storing averaged data over trials. In MNE the evoked objects are usually created by averaging epochs data with :func:`mne.Epochs.average`. """ import os.path as op import mne ############################################################################### # Here for convenience we read the evoked dataset from a file. data_path = mne.datasets.sample.data_path() fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif') evokeds = mne.read_evokeds(fname, baseline=(None, 0), proj=True) print(evokeds) ############################################################################### # Notice that the reader function returned a list of evoked instances. This is # because you can store multiple categories into a single file. Here we have # categories of # ``['Left Auditory', 'Right Auditory', 'Left Visual', 'Right Visual']``. # We can also use ``condition`` parameter to read in only one category. evoked = mne.read_evokeds(fname, condition='Left Auditory') evoked.apply_baseline((None, 0)).apply_proj() print(evoked) ############################################################################### # If you're gone through the tutorials of raw and epochs datasets, you're # probably already familiar with the :class:`Info ` attribute. # There is nothing new or special with the ``evoked.info``. All the relevant # info is still there. print(evoked.info) print(evoked.times) ############################################################################### # The evoked data structure also contains some new attributes easily # accessible: print(evoked.nave) # Number of averaged epochs. print(evoked.first) # First time sample. print(evoked.last) # Last time sample. print(evoked.comment) # Comment on dataset. Usually the condition. print(evoked.kind) # Type of data, either average or standard_error. ############################################################################### # The data is also easily accessible. Since the evoked data arrays are usually # much smaller than raw or epochs datasets, they are preloaded into the memory # when the evoked object is constructed. You can access the data as a numpy # array. data = evoked.data print(data.shape) ############################################################################### # The data is arranged in an array of shape `(n_channels, n_times)`. Notice # that unlike epochs, evoked object does not support indexing. This means that # to access the data of a specific channel you must use the data array # directly. print('Data from channel {0}:'.format(evoked.ch_names[10])) print(data[10]) ############################################################################### # If you want to import evoked data from some other system and you have it in a # numpy array you can use :class:`mne.EvokedArray` for that. All you need is # the data and some info about the evoked data. For more information, see # :ref:`tut_creating_data_structures`. evoked = mne.EvokedArray(data, evoked.info, tmin=evoked.times[0]) evoked.plot() ############################################################################### # To write an evoked dataset to a file, use the :meth:`mne.Evoked.save` method. # To save multiple categories to a single file, see :func:`mne.write_evokeds`. # coding: utf-8 from __future__ import unicode_literals import pytest from boxsdk.client import Client from boxsdk.exception import BoxAPIException def test_create_folder_then_update_info(created_subfolder): # pylint:disable=redefined-outer-name _test_create_then_update_info(created_subfolder) def test_create_file_then_update_info(uploaded_file): # pylint:disable=redefined-outer-name _test_create_then_update_info(uploaded_file) def _test_create_then_update_info(item): updated_name = 'updated_{0}'.format(item.name) updated_item = item.update_info({'name': updated_name}) assert updated_item.name == updated_name assert item.get().name == updated_name def test_create_folder_then_rename(created_subfolder): # pylint:disable=redefined-outer-name _test_create_then_rename(created_subfolder) def test_create_file_then_rename(uploaded_file): # pylint:disable=redefined-outer-name _test_create_then_rename(uploaded_file) def _test_create_then_rename(item): updated_name = 'updated_{0}'.format(item.name) updated_item = item.rename(updated_name) assert updated_item.name == updated_name assert item.get().name == updated_name def test_create_folder_then_move(box_client, created_subfolder): # pylint:disable=redefined-outer-name _test_create_then_move(box_client, created_subfolder) def test_create_file_then_move(box_client, uploaded_file): # pylint:disable=redefined-outer-name _test_create_then_move(box_client, uploaded_file) def _test_create_then_move(box_client, item): item_name = item.name move_target = box_client.folder('0').create_subfolder('move target') item.move(move_target) item = item.get() assert item.name == item_name assert item.parent['id'] == move_target.object_id assert len(box_client.folder('0').get_items(10)) == 1 assert len(move_target.get_items(10)) == 1 def test_create_folder_then_copy(box_client, created_subfolder): # pylint:disable=redefined-outer-name _test_create_then_copy(box_client, created_subfolder) def test_create_file_then_copy(box_client, uploaded_file): # pylint:disable=redefined-outer-name _test_create_then_copy(box_client, uploaded_file) def _test_create_then_copy(box_client, item): # pylint:disable=redefined-outer-name copy_target = box_client.folder('0').create_subfolder('copy target') copied_item = item.copy(copy_target) item = item.get() copied_item = copied_item.get() assert item.id != copied_item.id assert item.name == copied_item.name assert copied_item.parent['id'] == copy_target.object_id assert len(box_client.folder('0').get_items(10)) == 2 assert len(copy_target.get_items(10)) == 1 @pytest.mark.parametrize('constructor', [Client.file, Client.folder]) def test_get_item_info_for_missing_item(box_client, constructor): with pytest.raises(BoxAPIException) as exc_info: constructor(box_client, '1').get() assert exc_info.value.status == 404 @pytest.mark.parametrize('sync_state', ['synced', 'not_synced']) def test_set_sync_state(created_subfolder, sync_state): # pylint:disable=redefined-outer-name folder_with_info = created_subfolder.get(fields='sync_state') created_subfolder.update_sync_state(sync_state) updated_folder_with_info = created_subfolder.get(fields='sync_state') assert folder_with_info.sync_state == 'not_synced' assert updated_folder_with_info.sync_state == sync_state # -*- coding: utf-8 -*- # # Copyright (C) 2005-2009 Edgewall Software # Copyright (C) 2005-2006 Christopher Lenz # Copyright (C) 2005 Jeff Weiss # Copyright (C) 2006 Andres Salomon # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/log/. import os, re, types from genshi.core import Markup from trac.core import * from trac.config import Option from trac.db.api import IDatabaseConnector, _parse_db_str from trac.db.util import ConnectionWrapper, IterableCursor from trac.util import as_int, get_pkginfo from trac.util.compat import close_fds from trac.util.text import exception_to_unicode, to_unicode from trac.util.translation import _ _like_escape_re = re.compile(r'([/_%])') try: import MySQLdb import MySQLdb.cursors has_mysqldb = True class MySQLUnicodeCursor(MySQLdb.cursors.Cursor): def _convert_row(self, row): return tuple(v.decode('utf-8') if isinstance(v, str) else v for v in row) def fetchone(self): row = super(MySQLUnicodeCursor, self).fetchone() return self._convert_row(row) if row else None def fetchmany(self, num): rows = super(MySQLUnicodeCursor, self).fetchmany(num) return [self._convert_row(row) for row in rows] \ if rows is not None else [] def fetchall(self): rows = super(MySQLUnicodeCursor, self).fetchall() return [self._convert_row(row) for row in rows] \ if rows is not None else [] except ImportError: has_mysqldb = False # Mapping from "abstract" SQL types to DB-specific types _type_map = { 'int64': 'bigint', } class MySQLConnector(Component): """Database connector for MySQL version 4.1 and greater. Database URLs should be of the form: {{{ mysql://user[:password]@host[:port]/database[?param1=value¶m2=value] }}} The following parameters are supported: * `compress`: Enable compression (0 or 1) * `init_command`: Command to run once the connection is created * `named_pipe`: Use a named pipe to connect on Windows (0 or 1) * `read_default_file`: Read default client values from the given file * `read_default_group`: Configuration group to use from the default file * `unix_socket`: Use a Unix socket at the given path to connect """ implements(IDatabaseConnector) mysqldump_path = Option('trac', 'mysqldump_path', 'mysqldump', """Location of mysqldump for MySQL database backups""") def __init__(self): self._version = None self.error = None def get_supported_schemes(self): if not has_mysqldb: self.error = _("Cannot load Python bindings for MySQL") yield ('mysql', -1 if self.error else 1) def get_connection(self, path, log=None, user=None, password=None, host=None, port=None, params={}): cnx = MySQLConnection(path, log, user, password, host, port, params) if not self._version: self._version = get_pkginfo(MySQLdb).get('version', MySQLdb.__version__) mysql_info = 'server: "%s", client: "%s", thread-safe: %s' % \ (cnx.cnx.get_server_info(), MySQLdb.get_client_info(), MySQLdb.thread_safe()) self.env.systeminfo.extend([('MySQL', mysql_info), ('MySQLdb', self._version)]) self.required = True return cnx def get_exceptions(self): return MySQLdb def init_db(self, path, schema=None, log=None, user=None, password=None, host=None, port=None, params={}): cnx = self.get_connection(path, log, user, password, host, port, params) cursor = cnx.cursor() utf8_size = {'utf8': 3, 'utf8mb4': 4}.get(cnx.charset) if schema is None: from trac.db_default import schema for table in schema: for stmt in self.to_sql(table, utf8_size=utf8_size): self.log.debug(stmt) cursor.execute(stmt) cnx.commit() def _collist(self, table, columns, utf8_size=3): """Take a list of columns and impose limits on each so that indexing works properly. Some Versions of MySQL limit each index prefix to 1000 bytes total, with a max of 767 bytes per column. """ cols = [] limit_col = 767 / utf8_size limit = min(1000 / (utf8_size * len(columns)), limit_col) for c in columns: name = '`%s`' % c table_col = filter((lambda x: x.name == c), table.columns) if len(table_col) == 1 and table_col[0].type.lower() == 'text': if table_col[0].key_size is not None: name += '(%d)' % min(table_col[0].key_size, limit_col) elif name == '`rev`': name += '(20)' elif name == '`path`': name += '(%d)' % limit_col elif name == '`change_type`': name += '(2)' else: name += '(%s)' % limit # For non-text columns, we simply throw away the extra bytes. # That could certainly be optimized better, but for now let's KISS. cols.append(name) return ','.join(cols) def to_sql(self, table, utf8_size=3): sql = ['CREATE TABLE %s (' % table.name] coldefs = [] for column in table.columns: ctype = column.type ctype = _type_map.get(ctype, ctype) if column.auto_increment: ctype = 'INT UNSIGNED NOT NULL AUTO_INCREMENT' # Override the column type, as a text field cannot # use auto_increment. column.type = 'int' coldefs.append(' `%s` %s' % (column.name, ctype)) if len(table.key) > 0: coldefs.append(' PRIMARY KEY (%s)' % self._collist(table, table.key, utf8_size=utf8_size)) sql.append(',\n'.join(coldefs) + '\n)') yield '\n'.join(sql) for index in table.indices: unique = 'UNIQUE' if index.unique else '' yield 'CREATE %s INDEX %s_%s_idx ON %s (%s);' % (unique, table.name, '_'.join(index.columns), table.name, self._collist(table, index.columns, utf8_size=utf8_size)) def alter_column_types(self, table, columns): """Yield SQL statements altering the type of one or more columns of a table. Type changes are specified as a `columns` dict mapping column names to `(from, to)` SQL type tuples. """ alterations = [] for name, (from_, to) in sorted(columns.iteritems()): to = _type_map.get(to, to) if to != _type_map.get(from_, from_): alterations.append((name, to)) if alterations: yield "ALTER TABLE %s %s" % (table, ', '.join("MODIFY %s %s" % each for each in alterations)) def backup(self, dest_file): from subprocess import Popen, PIPE db_url = self.env.config.get('trac', 'database') scheme, db_prop = _parse_db_str(db_url) db_params = db_prop.setdefault('params', {}) db_name = os.path.basename(db_prop['path']) args = [self.mysqldump_path] if 'host' in db_prop: args.extend(['-h', db_prop['host']]) if 'port' in db_prop: args.extend(['-P', str(db_prop['port'])]) if 'user' in db_prop: args.extend(['-u', db_prop['user']]) for name, value in db_params.iteritems(): if name == 'compress' and as_int(value, 0): args.append('--compress') elif name == 'named_pipe' and as_int(value, 0): args.append('--protocol=pipe') elif name == 'read_default_file': # Must be first args.insert(1, '--defaults-file=' + value) elif name == 'unix_socket': args.extend(['--protocol=socket', '--socket=' + value]) elif name not in ('init_command', 'read_default_group'): self.log.warning("Invalid connection string parameter '%s'", name) args.extend(['-r', dest_file, db_name]) environ = os.environ.copy() if 'password' in db_prop: environ['MYSQL_PWD'] = str(db_prop['password']) try: p = Popen(args, env=environ, stderr=PIPE, close_fds=close_fds) except OSError, e: raise TracError(_("Unable to run %(path)s: %(msg)s", path=self.mysqldump_path, msg=exception_to_unicode(e))) errmsg = p.communicate()[1] if p.returncode != 0: raise TracError(_("mysqldump failed: %(msg)s", msg=to_unicode(errmsg.strip()))) if not os.path.exists(dest_file): raise TracError(_("No destination file created")) return dest_file class MySQLConnection(ConnectionWrapper): """Connection wrapper for MySQL.""" poolable = True def __init__(self, path, log, user=None, password=None, host=None, port=None, params={}): if path.startswith('/'): path = path[1:] if password == None: password = '' if port == None: port = 3306 opts = {} for name, value in params.iteritems(): if name in ('init_command', 'read_default_file', 'read_default_group', 'unix_socket'): opts[name] = value elif name in ('compress', 'named_pipe'): opts[name] = as_int(value, 0) else: self.log.warning("Invalid connection string parameter '%s'", name) cnx = MySQLdb.connect(db=path, user=user, passwd=password, host=host, port=port, charset='utf8', **opts) if hasattr(cnx, 'encoders'): # 'encoders' undocumented but present since 1.2.1 (r422) cnx.encoders[Markup] = cnx.encoders[types.UnicodeType] cursor = cnx.cursor() cursor.execute("SHOW VARIABLES WHERE " " variable_name='character_set_database'") self.charset = cursor.fetchone()[1] if self.charset != 'utf8': cnx.query("SET NAMES %s" % self.charset) cnx.store_result() ConnectionWrapper.__init__(self, cnx, log) self._is_closed = False def cast(self, column, type): if type == 'int' or type == 'int64': type = 'signed' elif type == 'text': type = 'char' return 'CAST(%s AS %s)' % (column, type) def concat(self, *args): return 'concat(%s)' % ', '.join(args) def like(self): """Return a case-insensitive LIKE clause.""" return "LIKE %%s COLLATE %s_general_ci ESCAPE '/'" % self.charset def like_escape(self, text): return _like_escape_re.sub(r'/\1', text) def quote(self, identifier): """Return the quoted identifier.""" return "`%s`" % identifier.replace('`', '``') def get_last_id(self, cursor, table, column='id'): return cursor.lastrowid def update_sequence(self, cursor, table, column='id'): # MySQL handles sequence updates automagically pass def rollback(self): self.cnx.ping() try: self.cnx.rollback() except MySQLdb.ProgrammingError: self._is_closed = True def close(self): if not self._is_closed: try: self.cnx.close() except MySQLdb.ProgrammingError: pass # this error would mean it's already closed. So, ignore self._is_closed = True def cursor(self): return IterableCursor(MySQLUnicodeCursor(self.cnx), self.log) # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import posixpath from file_system import FileSystem, FileNotFoundError from future import Future from test_file_system import _List, _StatTracker, TestFileSystem from path_util import IsDirectory class MockFileSystem(FileSystem): '''Wraps FileSystems to add a selection of mock behaviour: - asserting how often Stat/Read calls are being made to it. - primitive changes/versioning via applying object "diffs", mapping paths to new content (similar to how TestFileSystem works). ''' def __init__(self, file_system): self._file_system = file_system # Updates are stored as TestFileSystems because it already implements a # bunch of logic to intepret paths into dictionaries. self._updates = [] self._stat_tracker = _StatTracker() self._read_count = 0 self._read_resolve_count = 0 self._stat_count = 0 self._version = None @staticmethod def Create(file_system, updates): mock_file_system = MockFileSystem(file_system) for update in updates: mock_file_system.Update(update) return mock_file_system # # FileSystem implementation. # def Read(self, paths, skip_not_found=False): '''Reads |paths| from |_file_system|, then applies the most recent update from |_updates|, if any. ''' self._read_count += 1 def next(result): self._read_resolve_count += 1 for path in result.iterkeys(): update = self._GetMostRecentUpdate(path) if update is not None: result[path] = update return result return self._file_system.Read(paths, skip_not_found=skip_not_found).Then(next) def Refresh(self): return self._file_system.Refresh() def _GetMostRecentUpdate(self, path): '''Returns the latest update for the file at |path|, or None if |path| has never been updated. ''' for update in reversed(self._updates): try: return update.ReadSingle(path).Get() except FileNotFoundError: pass return None def Stat(self, path): self._stat_count += 1 # This only supports numeric stat values since we need to add to it. In # reality the logic here could just be to randomly mutate the stat values # every time there's an Update but that's less meaningful for testing. def stradd(a, b): return str(int(a) + b) stat = self._file_system.Stat(path) stat.version = stradd(stat.version, self._stat_tracker.GetVersion(path)) if stat.child_versions: for child_path, child_version in stat.child_versions.iteritems(): stat.child_versions[child_path] = stradd( stat.child_versions[child_path], self._stat_tracker.GetVersion(posixpath.join(path, child_path))) return stat def GetCommitID(self): return Future(value=str(self._stat_tracker.GetVersion(''))) def GetPreviousCommitID(self): return Future(value=str(self._stat_tracker.GetVersion('') - 1)) def GetIdentity(self): return self._file_system.GetIdentity() def GetVersion(self): return self._version def __str__(self): return repr(self) def __repr__(self): return 'MockFileSystem(read_count=%s, stat_count=%s, updates=%s)' % ( self._read_count, self._stat_count, len(self._updates)) # # Testing methods. # def GetStatCount(self): return self._stat_count def CheckAndReset(self, stat_count=0, read_count=0, read_resolve_count=0): '''Returns a tuple (success, error). Use in tests like: self.assertTrue(*object_store.CheckAndReset(...)) ''' errors = [] for desc, expected, actual in ( ('read_count', read_count, self._read_count), ('read_resolve_count', read_resolve_count, self._read_resolve_count), ('stat_count', stat_count, self._stat_count)): if actual != expected: errors.append('%s: expected %s got %s' % (desc, expected, actual)) try: return (len(errors) == 0, ', '.join(errors)) finally: self.Reset() def Reset(self): self._read_count = 0 self._read_resolve_count = 0 self._stat_count = 0 def Update(self, update): self._updates.append(TestFileSystem(update)) for path in _List(update).iterkeys(): # Any files (not directories) which changed are now at the version # derived from |_updates|. if not IsDirectory(path): self._stat_tracker.SetVersion(path, len(self._updates)) def SetVersion(self, version): '''Override the reported FileSystem version (default None) for testing.''' self._version = version #!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rax_meta short_description: Manipulate metadata for Rackspace Cloud Servers description: - Manipulate metadata for Rackspace Cloud Servers version_added: 1.7 options: address: description: - Server IP address to modify metadata for, will match any IP assigned to the server id: description: - Server ID to modify metadata for name: description: - Server name to modify metadata for meta: description: - A hash of metadata to associate with the instance author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' - name: Set metadata for a server hosts: all gather_facts: False tasks: - name: Set metadata local_action: module: rax_meta credentials: ~/.raxpub name: "{{ inventory_hostname }}" region: DFW meta: group: primary_group groups: - group_two - group_three app: my_app - name: Clear metadata local_action: module: rax_meta credentials: ~/.raxpub name: "{{ inventory_hostname }}" region: DFW ''' import json try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module from ansible.module_utils.six import string_types def rax_meta(module, address, name, server_id, meta): changed = False cs = pyrax.cloudservers if cs is None: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') search_opts = {} if name: search_opts = dict(name='^%s$' % name) try: servers = cs.servers.list(search_opts=search_opts) except Exception as e: module.fail_json(msg='%s' % e.message) elif address: servers = [] try: for server in cs.servers.list(): for addresses in server.networks.values(): if address in addresses: servers.append(server) break except Exception as e: module.fail_json(msg='%s' % e.message) elif server_id: servers = [] try: servers.append(cs.servers.get(server_id)) except Exception as e: pass if len(servers) > 1: module.fail_json(msg='Multiple servers found matching provided ' 'search parameters') elif not servers: module.fail_json(msg='Failed to find a server matching provided ' 'search parameters') # Normalize and ensure all metadata values are strings for k, v in meta.items(): if isinstance(v, list): meta[k] = ','.join(['%s' % i for i in v]) elif isinstance(v, dict): meta[k] = json.dumps(v) elif not isinstance(v, string_types): meta[k] = '%s' % v server = servers[0] if server.metadata == meta: changed = False else: changed = True removed = set(server.metadata.keys()).difference(meta.keys()) cs.servers.delete_meta(server, list(removed)) cs.servers.set_meta(server, meta) server.get() module.exit_json(changed=changed, meta=server.metadata) def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( address=dict(), id=dict(), name=dict(), meta=dict(type='dict', default=dict()), ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together(), mutually_exclusive=[['address', 'id', 'name']], required_one_of=[['address', 'id', 'name']], ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') address = module.params.get('address') server_id = module.params.get('id') name = module.params.get('name') meta = module.params.get('meta') setup_rax_module(module, pyrax) rax_meta(module, address, name, server_id, meta) if __name__ == '__main__': main() """Unit tests for pkgmgr Just import this package into the python repl. We can eventually convert this to unit tests. You can allow run from the command line: python -c "from test_pkgmgr import *" """ DRY_RUN=False import logging logging.basicConfig(level=logging.DEBUG) import tempfile import shutil from pkgmgr import * pkgs = [] django_pkg_js = { "filename": "Django-1.4.10.tar.gz", "name": "Django", "version":"1.4", "pkg_type":"pip", "package_name": "django==1.4.10", "license": {"name":"BSD 3-Clause", "url":"https://github.com/django/django/blob/master/LICENSE"}, "patch_version":"1.4.10", "md5sum": "d324aecc37ce5430f548653b8b1509b6", "cksum": "1774032669" } django = Package.from_json(django_pkg_js) pkgs.append(django) print django.pretty_print() pip_pkg_js = { "filename": "pip-1.5.2.tar.gz", "name": "pip", "version": "1.5", "patch_version":"1.5.2", "pkg_type": "tgz", "source_url": "https://pypi.python.org/packages/source/p/pip/pip-1.5.2.tar.gz#md5=5da30919f732d68b1c666e484e7676f5", "md5sum":"5da30919f732d68b1c666e484e7676f5", "license": {"name":"MIT", "url":"https://github.com/pypa/pip/blob/develop/LICENSE.txt"} } pip = Package.from_json(pip_pkg_js) pkgs.append(pip) print pip.pretty_print() tmpdir = tempfile.mkdtemp() print "Creating %s for tests" % tmpdir tmpdir2 = None try: print "Initial downloads" for p in pkgs: print "Initial download of %s" % p.filename p.download_from_source(tmpdir, dry_run=DRY_RUN) print "Will download everything again, should skip all files" for p in pkgs: print "Re-download %s" % p.filename path = p.download([], tmpdir, dry_run=DRY_RUN) assert os.path.exists(path) tmpdir2 = tempfile.mkdtemp() print "Secondary repository at %s" % tmpdir2 repo = 'file:' + tmpdir print '"Download" from local repository %s' % repo for p in pkgs: path = p.download([repo,], tmpdir2, dry_run=DRY_RUN) assert os.path.exists(path) # test the gathering of packages engage_dir = fixpath(os.path.join(os.path.dirname(fixpath(__file__)), '../../engage')) if not os.path.exists(engage_dir): raise Exception("Could not run gather test - no engage directory at %s" % engage_dir) package_file = os.path.join(tmpdir2, 'packages.json') print "gathering resource files from %s" % engage_dir cnt = gather_package_definitions([engage_dir,], package_file) assert cnt==1, "Expecting 1 packages, got %d" % cnt finally: shutil.rmtree(tmpdir) print "deleted %s" % tmpdir if tmpdir2: shutil.rmtree(tmpdir2) print "deleted %s" % tmpdir2 print "all tests passed" # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for text input preprocessing. May benefit from a fast Cython rewrite. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import OrderedDict from hashlib import md5 import string import sys import numpy as np from six.moves import range # pylint: disable=redefined-builtin from six.moves import zip # pylint: disable=redefined-builtin if sys.version_info < (3,): maketrans = string.maketrans else: maketrans = str.maketrans def text_to_word_sequence(text, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' '): """Converts a text to a sequence of words (or tokens). Arguments: text: Input text (string). filters: Sequence of characters to filter out. lower: Whether to convert the input to lowercase. split: Sentence split marker (string). Returns: A list of words (or tokens). """ if lower: text = text.lower() text = text.translate(maketrans(filters, split * len(filters))) seq = text.split(split) return [i for i in seq if i] def one_hot(text, n, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' '): return hashing_trick( text, n, hash_function=hash, filters=filters, lower=lower, split=split) def hashing_trick(text, n, hash_function=None, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' '): """Converts a text to a sequence of indexes in a fixed-size hashing space. Arguments: text: Input text (string). n: Dimension of the hashing space. hash_function: if `None` uses python `hash` function, can be 'md5' or any function that takes in input a string and returns a int. Note that `hash` is not a stable hashing function, so it is not consistent across different runs, while 'md5' is a stable hashing function. filters: Sequence of characters to filter out. lower: Whether to convert the input to lowercase. split: Sentence split marker (string). Returns: A list of integer word indices (unicity non-guaranteed). `0` is a reserved index that won't be assigned to any word. Two or more words may be assigned to the same index, due to possible collisions by the hashing function. """ if hash_function is None: hash_function = hash elif hash_function == 'md5': hash_function = lambda w: int(md5(w.encode()).hexdigest(), 16) seq = text_to_word_sequence(text, filters=filters, lower=lower, split=split) return [(hash_function(w) % (n - 1) + 1) for w in seq] class Tokenizer(object): """Text tokenization utility class. This class allows to vectorize a text corpus, by turning each text into either a sequence of integers (each integer being the index of a token in a dictionary) or into a vector where the coefficient for each token could be binary, based on word count, based on tf-idf... Arguments: num_words: the maximum number of words to keep, based on word frequency. Only the most common `num_words` words will be kept. filters: a string where each element is a character that will be filtered from the texts. The default is all punctuation, plus tabs and line breaks, minus the `'` character. lower: boolean. Whether to convert the texts to lowercase. split: character or string to use for token splitting. char_level: if True, every character will be treated as a token. By default, all punctuation is removed, turning the texts into space-separated sequences of words (words maybe include the `'` character). These sequences are then split into lists of tokens. They will then be indexed or vectorized. `0` is a reserved index that won't be assigned to any word. """ def __init__(self, num_words=None, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=' ', char_level=False): self.word_counts = OrderedDict() self.word_docs = {} self.filters = filters self.split = split self.lower = lower self.num_words = num_words self.document_count = 0 self.char_level = char_level def fit_on_texts(self, texts): """Updates internal vocabulary based on a list of texts. Required before using `texts_to_sequences` or `texts_to_matrix`. Arguments: texts: can be a list of strings, or a generator of strings (for memory-efficiency) """ self.document_count = 0 for text in texts: self.document_count += 1 seq = text if self.char_level else text_to_word_sequence( text, self.filters, self.lower, self.split) for w in seq: if w in self.word_counts: self.word_counts[w] += 1 else: self.word_counts[w] = 1 for w in set(seq): if w in self.word_docs: self.word_docs[w] += 1 else: self.word_docs[w] = 1 wcounts = list(self.word_counts.items()) wcounts.sort(key=lambda x: x[1], reverse=True) sorted_voc = [wc[0] for wc in wcounts] # note that index 0 is reserved, never assigned to an existing word self.word_index = dict( list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1))))) self.index_docs = {} for w, c in list(self.word_docs.items()): self.index_docs[self.word_index[w]] = c def fit_on_sequences(self, sequences): """Updates internal vocabulary based on a list of sequences. Required before using `sequences_to_matrix` (if `fit_on_texts` was never called). Arguments: sequences: A list of sequence. A "sequence" is a list of integer word indices. """ self.document_count = len(sequences) self.index_docs = {} for seq in sequences: seq = set(seq) for i in seq: if i not in self.index_docs: self.index_docs[i] = 1 else: self.index_docs[i] += 1 def texts_to_sequences(self, texts): """Transforms each text in texts in a sequence of integers. Only top "num_words" most frequent words will be taken into account. Only words known by the tokenizer will be taken into account. Arguments: texts: A list of texts (strings). Returns: A list of sequences. """ res = [] for vect in self.texts_to_sequences_generator(texts): res.append(vect) return res def texts_to_sequences_generator(self, texts): """Transforms each text in texts in a sequence of integers. Only top "num_words" most frequent words will be taken into account. Only words known by the tokenizer will be taken into account. Arguments: texts: A list of texts (strings). Yields: Yields individual sequences. """ num_words = self.num_words for text in texts: seq = text if self.char_level else text_to_word_sequence( text, self.filters, self.lower, self.split) vect = [] for w in seq: i = self.word_index.get(w) if i is not None: if num_words and i >= num_words: continue else: vect.append(i) yield vect def texts_to_matrix(self, texts, mode='binary'): """Convert a list of texts to a Numpy matrix. Arguments: texts: list of strings. mode: one of "binary", "count", "tfidf", "freq". Returns: A Numpy matrix. """ sequences = self.texts_to_sequences(texts) return self.sequences_to_matrix(sequences, mode=mode) def sequences_to_matrix(self, sequences, mode='binary'): """Converts a list of sequences into a Numpy matrix. Arguments: sequences: list of sequences (a sequence is a list of integer word indices). mode: one of "binary", "count", "tfidf", "freq" Returns: A Numpy matrix. Raises: ValueError: In case of invalid `mode` argument, or if the Tokenizer requires to be fit to sample data. """ if not self.num_words: if self.word_index: num_words = len(self.word_index) + 1 else: raise ValueError('Specify a dimension (num_words argument), ' 'or fit on some text data first.') else: num_words = self.num_words if mode == 'tfidf' and not self.document_count: raise ValueError('Fit the Tokenizer on some data ' 'before using tfidf mode.') x = np.zeros((len(sequences), num_words)) for i, seq in enumerate(sequences): if not seq: continue counts = {} for j in seq: if j >= num_words: continue if j not in counts: counts[j] = 1. else: counts[j] += 1 for j, c in list(counts.items()): if mode == 'count': x[i][j] = c elif mode == 'freq': x[i][j] = c / len(seq) elif mode == 'binary': x[i][j] = 1 elif mode == 'tfidf': # Use weighting scheme 2 in # https://en.wikipedia.org/wiki/Tf%E2%80%93idf tf = 1 + np.log(c) idf = np.log(1 + self.document_count / (1 + self.index_docs.get(j, 0))) x[i][j] = tf * idf else: raise ValueError('Unknown vectorization mode:', mode) return x # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility functions specifically for NMT.""" from __future__ import print_function import codecs import time import numpy as np import tensorflow as tf from utils import evaluation_utils from utils import misc_utils as utils __all__ = ["decode_and_evaluate", "get_translation"] def decode_and_evaluate(name, model, sess, trans_file, ref_file, metrics, subword_option, beam_width, tgt_eos, num_translations_per_input=1, decode=True, infer_mode="greedy"): """Decode a test set and compute a score according to the evaluation task.""" # Decode if decode: utils.print_out(" decoding to output %s." % trans_file) start_time = time.time() num_sentences = 0 with codecs.getwriter("utf-8")( tf.gfile.GFile(trans_file, mode="wb")) as trans_f: trans_f.write("") # Write empty string to ensure file is created. if infer_mode == "greedy": num_translations_per_input = 1 elif infer_mode == "beam_search": num_translations_per_input = min(num_translations_per_input, beam_width) while True: try: nmt_outputs, _ = model.decode(sess) if infer_mode != "beam_search": nmt_outputs = np.expand_dims(nmt_outputs, 0) batch_size = nmt_outputs.shape[1] num_sentences += batch_size for sent_id in range(batch_size): for beam_id in range(num_translations_per_input): translation = get_translation( nmt_outputs[beam_id], sent_id, tgt_eos=tgt_eos, subword_option=subword_option) trans_f.write((translation + b"\n").decode("utf-8")) except tf.errors.OutOfRangeError: utils.print_time( " done, num sentences %d, num translations per input %d" % (num_sentences, num_translations_per_input), start_time) break # Evaluation evaluation_scores = {} if ref_file and tf.gfile.Exists(trans_file): for metric in metrics: score = evaluation_utils.evaluate( ref_file, trans_file, metric, subword_option=subword_option) evaluation_scores[metric] = score utils.print_out(" %s %s: %.1f" % (metric, name, score)) return evaluation_scores def get_translation(nmt_outputs, sent_id, tgt_eos, subword_option): """Given batch decoding outputs, select a sentence and turn to text.""" if tgt_eos: tgt_eos = tgt_eos.encode("utf-8") # Select a sentence output = nmt_outputs[sent_id, :].tolist() # If there is an eos symbol in outputs, cut them at that point. if tgt_eos and tgt_eos in output: output = output[:output.index(tgt_eos)] if subword_option == "bpe": # BPE translation = utils.format_bpe_text(output) elif subword_option == "spm": # SPM translation = utils.format_spm_text(output) else: translation = utils.format_text(output) return translation # coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.8.2 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class V1GlusterfsVolumeSource(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'endpoints': 'str', 'path': 'str', 'read_only': 'bool' } attribute_map = { 'endpoints': 'endpoints', 'path': 'path', 'read_only': 'readOnly' } def __init__(self, endpoints=None, path=None, read_only=None): """ V1GlusterfsVolumeSource - a model defined in Swagger """ self._endpoints = None self._path = None self._read_only = None self.discriminator = None self.endpoints = endpoints self.path = path if read_only is not None: self.read_only = read_only @property def endpoints(self): """ Gets the endpoints of this V1GlusterfsVolumeSource. EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod :return: The endpoints of this V1GlusterfsVolumeSource. :rtype: str """ return self._endpoints @endpoints.setter def endpoints(self, endpoints): """ Sets the endpoints of this V1GlusterfsVolumeSource. EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod :param endpoints: The endpoints of this V1GlusterfsVolumeSource. :type: str """ if endpoints is None: raise ValueError("Invalid value for `endpoints`, must not be `None`") self._endpoints = endpoints @property def path(self): """ Gets the path of this V1GlusterfsVolumeSource. Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod :return: The path of this V1GlusterfsVolumeSource. :rtype: str """ return self._path @path.setter def path(self, path): """ Sets the path of this V1GlusterfsVolumeSource. Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod :param path: The path of this V1GlusterfsVolumeSource. :type: str """ if path is None: raise ValueError("Invalid value for `path`, must not be `None`") self._path = path @property def read_only(self): """ Gets the read_only of this V1GlusterfsVolumeSource. ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod :return: The read_only of this V1GlusterfsVolumeSource. :rtype: bool """ return self._read_only @read_only.setter def read_only(self, read_only): """ Sets the read_only of this V1GlusterfsVolumeSource. ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod :param read_only: The read_only of this V1GlusterfsVolumeSource. :type: bool """ self._read_only = read_only def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, V1GlusterfsVolumeSource): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other #!/usr/bin/env python # # Use the raw transactions API to spend bitcoins received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a bitcoind or Bitcoin-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the bitcoin data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/Bitcoin/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "Bitcoin") return os.path.expanduser("~/.bitcoin") def read_bitcoin_config(dbdir): """Read the bitcoin.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a bitcoin JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 18332 if testnet else 8332 connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the bitcoind we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(bitcoind): info = bitcoind.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") bitcoind.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = bitcoind.getinfo() return int(info['unlocked_until']) > time.time() def list_available(bitcoind): address_summary = dict() address_to_account = dict() for info in bitcoind.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = bitcoind.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = bitcoind.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-bitcoin-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(bitcoind, fromaddresses, toaddress, amount, fee): all_coins = list_available(bitcoind) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to bitcoind. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = bitcoind.createrawtransaction(inputs, outputs) signed_rawtx = bitcoind.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(bitcoind, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = bitcoind.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(bitcoind, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = bitcoind.decoderawtransaction(txdata_hex) total_in = compute_amount_in(bitcoind, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get bitcoins from") parser.add_option("--to", dest="to", default=None, help="address to get send bitcoins to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of bitcoin.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True bitcoind = connect_JSON(config) if options.amount is None: address_summary = list_available(bitcoind) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(bitcoind) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = bitcoind.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main() #!/usr/bin/python # Copyright 2013 Google Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ''' GCE external inventory script ================================= Generates inventory that Ansible can understand by making API requests Google Compute Engine via the libcloud library. Full install/configuration instructions for the gce* modules can be found in the comments of ansible/test/gce_tests.py. When run against a specific host, this script returns the following variables based on the data obtained from the libcloud Node object: - gce_uuid - gce_id - gce_image - gce_machine_type - gce_private_ip - gce_public_ip - gce_name - gce_description - gce_status - gce_zone - gce_tags - gce_metadata - gce_network When run in --list mode, instances are grouped by the following categories: - zone: zone group name examples are us-central1-b, europe-west1-a, etc. - instance tags: An entry is created for each tag. For example, if you have two instances with a common tag called 'foo', they will both be grouped together under the 'tag_foo' name. - network name: the name of the network is appended to 'network_' (e.g. the 'default' network will result in a group named 'network_default') - machine type types follow a pattern like n1-standard-4, g1-small, etc. - running status: group name prefixed with 'status_' (e.g. status_running, status_stopped,..) - image: when using an ephemeral/scratch disk, this will be set to the image name used when creating the instance (e.g. debian-7-wheezy-v20130816). when your instance was created with a root persistent disk it will be set to 'persistent_disk' since there is no current way to determine the image. Examples: Execute uname on all instances in the us-central1-a zone $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" Use the GCE inventory script to print out instance specific information $ plugins/inventory/gce.py --host my_instance Author: Eric Johnson Version: 0.0.1 ''' USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" USER_AGENT_VERSION="v1" import sys import os import argparse import ConfigParser try: import json except ImportError: import simplejson as json try: from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver _ = Provider.GCE except: print("GCE inventory script requires libcloud >= 0.13") sys.exit(1) class GceInventory(object): def __init__(self): # Read settings and parse CLI arguments self.parse_cli_args() self.driver = self.get_gce_driver() # Just display data for specific host if self.args.host: print self.json_format_dict(self.node_to_dict( self.get_instance(self.args.host))) sys.exit(0) # Otherwise, assume user wants all instances grouped print self.json_format_dict(self.group_instances()) sys.exit(0) def get_gce_driver(self): '''Determine GCE authorization settings and return libcloud driver.''' gce_ini_default_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) config = ConfigParser.SafeConfigParser() config.read(gce_ini_path) # the GCE params in 'secrets.py' will override these secrets_path = config.get('gce', 'libcloud_secrets') secrets_found = False try: import secrets args = getattr(secrets, 'GCE_PARAMS', ()) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found and secrets_path: if not secrets_path.endswith('secrets.py'): err = "Must specify libcloud secrets file as " err += "/absolute/path/to/secrets.py" print(err) sys.exit(1) sys.path.append(os.path.dirname(secrets_path)) try: import secrets args = getattr(secrets, 'GCE_PARAMS', ()) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found: args = ( config.get('gce','gce_service_account_email_address'), config.get('gce','gce_service_account_pem_file_path') ) kwargs = {'project': config.get('gce','gce_project_id')} gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append("%s/%s" % ( USER_AGENT_PRODUCT, USER_AGENT_VERSION)) return gce def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on GCE') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all information about an instance') self.args = parser.parse_args() def node_to_dict(self, inst): md = {} if inst is None: return {} if inst.extra['metadata'].has_key('items'): for entry in inst.extra['metadata']['items']: md[entry['key']] = entry['value'] net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] return { 'gce_uuid': inst.uuid, 'gce_id': inst.id, 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], 'gce_public_ip': inst.public_ips[0], 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], 'gce_zone': inst.extra['zone'].name, 'gce_tags': inst.extra['tags'], 'gce_metadata': md, 'gce_network': net, # Hosts don't have a public name, so we add an IP 'ansible_ssh_host': inst.public_ips[0] } def get_instance(self, instance_name): '''Gets details about a specific instance ''' try: return self.driver.ex_get_node(instance_name) except Exception, e: return None def group_instances(self): '''Group all instances''' groups = {} for node in self.driver.list_nodes(): name = node.name zone = node.extra['zone'].name if groups.has_key(zone): groups[zone].append(name) else: groups[zone] = [name] tags = node.extra['tags'] for t in tags: tag = 'tag_%s' % t if groups.has_key(tag): groups[tag].append(name) else: groups[tag] = [name] net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] net = 'network_%s' % net if groups.has_key(net): groups[net].append(name) else: groups[net] = [name] machine_type = node.size if groups.has_key(machine_type): groups[machine_type].append(name) else: groups[machine_type] = [name] image = node.image and node.image or 'persistent_disk' if groups.has_key(image): groups[image].append(name) else: groups[image] = [name] status = node.extra['status'] stat = 'status_%s' % status.lower() if groups.has_key(stat): groups[stat].append(name) else: groups[stat] = [name] return groups def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) # Run the script GceInventory() #!/usr/bin/env python # -*- coding: utf-8 -*- ''' @author: Travis A. Ebesu @created: 2015-02-22 @summary: ''' # pylint: disable=all class Node(object): def __init__(self, key): self.key = key self.left = None self.right = None self.parent = None def __str__(self): return '{0}'.format(self.key) def __repr__(self): return 'Key: {0:<5}\tParent: {1:<5}\tLeft: {2:<5}\tRight: {3:<5}'.format(self.key, self.parent, self.left, self.right) def _search(node, key): ''' Searches the binary tree for a key Directs it left if smaller and right if bigger Iterative is faster than recursive ''' temp = node prev = temp while temp != None or temp != None and key != temp.key: if key < temp.key: prev = temp temp = temp.left else: prev = temp temp = temp.right if key == prev.key: return prev else: return None def _print_helper(root, indent): ''' @source: http://www.cs.toronto.edu/~rdanek/csc148h_09/lectures/8/bst.py Print the tree rooted at BTNode root. Print str indent (which consists only of whitespace) before the root value; indent more for the subtrees so that it looks nice.''' if root is not None: print_helper(root.right, indent + " ") print indent + str(root.key) print_helper(root.left, indent + " ") class BinaryTree(object): def __init__(self): self.root = None self.size = 0 def __len__(self): return self.size @property def length(self): return self.size def max(self, node=None): ''' Returns the maximum value in the tree ''' temp = self.root if node == None else node while temp.right != None: temp = temp.right return temp def min(self, node=None): ''' Returns the min value in the tree ''' temp = self.root if node == None else node while temp.left != None: temp = temp.left return temp def __transplant(self, u, v): # Replace U with V # u is the root if u.parent == None: self.root = v # u is a left child elif u.key == u.parent.left.key: u.parent.left = v # u is a right child else: u.parent.right = v if v != None: v.parent = u.parent def __delete_node(self, node): ''' Deletes a node ''' # No left element if node.left == None: self.__transplant(node, node.right) # No right element elif node.right == None: self.__transplant(node, node.left) else: temp = self.min(node) if temp.parent != node: self.__transplant(temp, temp.right) temp.right = node.right temp.right.parent = temp self.__transplant(node, temp) temp.left = node.left temp.left.parent = node def delete(self, key): ''' Deletes a node given a key ''' node = self.search(key) if node != None: self.__delete_node(node) self.size -= 1 else: raise KeyError('No such node exists in tree') def insert(self, key): ''' Inserts a node, left if key < parent else right Left has smaller, right has bigger ''' self.size += 1 node = Node(key) cur = None parent = self.root while parent is not None: cur = parent parent = parent.left if node.key < parent.key else parent.right node.parent = cur if cur is None: self.root = node elif node.key < cur.key: cur.left = node else: cur.right = node def search(self, key): ''' Searches for a given element in the tree ''' return _search(self.root, key) def __inorder_tree_walk(self, node): ''' prints out the elements in order ''' if node != None: self.__inorder_tree_walk(node.left) print node.key self.__inorder_tree_walk(node.right) def __str__(self): ''' Prints the tree out by depth ''' s = dict() depth = 0 def recursive(node, depth): if node != None: recursive(node.left, depth + 1) temp = s.get(depth, None) if temp: temp.append(node.key) else: temp = [node.key] s[depth] = temp recursive(node.right, depth + 1) recursive(self.root, 1) output = [] for depth in sorted(s.keys()): layer = '' for v in s[depth]: layer += '{0}{1}'.format(' ' * depth, v) output.append(layer) return '\n'.join(output) def print_tree(self): ''' source = http://www.cs.toronto.edu/~rdanek/csc148h_09/lectures/8/bst.py Print the tree rooted at root.''' _print_helper(self.root, "") if __name__ == '__main__': print bt = BinaryTree() bt.insert(10) bt.insert(5) bt.insert(3) bt.insert(20) bt.delete(5) bt.print_tree() # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import cStringIO import struct import dns.exception import dns.inet import dns.rdata import dns.tokenizer class APLItem(object): """An APL list item. @ivar family: the address family (IANA address family registry) @type family: int @ivar negation: is this item negated? @type negation: bool @ivar address: the address @type address: string @ivar prefix: the prefix length @type prefix: int """ __slots__ = ['family', 'negation', 'address', 'prefix'] def __init__(self, family, negation, address, prefix): self.family = family self.negation = negation self.address = address self.prefix = prefix def __str__(self): if self.negation: return "!%d:%s/%s" % (self.family, self.address, self.prefix) else: return "%d:%s/%s" % (self.family, self.address, self.prefix) def to_wire(self, file): if self.family == 1: address = dns.inet.inet_pton(dns.inet.AF_INET, self.address) elif self.family == 2: address = dns.inet.inet_pton(dns.inet.AF_INET6, self.address) else: address = self.address.decode('hex_codec') # # Truncate least significant zero bytes. # last = 0 for i in xrange(len(address) - 1, -1, -1): if address[i] != chr(0): last = i + 1 break address = address[0 : last] l = len(address) assert l < 128 if self.negation: l |= 0x80 header = struct.pack('!HBB', self.family, self.prefix, l) file.write(header) file.write(address) class APL(dns.rdata.Rdata): """APL record. @ivar items: a list of APL items @type items: list of APL_Item @see: RFC 3123""" __slots__ = ['items'] def __init__(self, rdclass, rdtype, items): super(APL, self).__init__(rdclass, rdtype) self.items = items def to_text(self, origin=None, relativize=True, **kw): return ' '.join(map(lambda x: str(x), self.items)) def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): items = [] while 1: token = tok.get().unescape() if token.is_eol_or_eof(): break item = token.value if item[0] == '!': negation = True item = item[1:] else: negation = False (family, rest) = item.split(':', 1) family = int(family) (address, prefix) = rest.split('/', 1) prefix = int(prefix) item = APLItem(family, negation, address, prefix) items.append(item) return cls(rdclass, rdtype, items) from_text = classmethod(from_text) def to_wire(self, file, compress = None, origin = None): for item in self.items: item.to_wire(file) def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None): items = [] while 1: if rdlen < 4: raise dns.exception.FormError header = struct.unpack('!HBB', wire[current : current + 4]) afdlen = header[2] if afdlen > 127: negation = True afdlen -= 128 else: negation = False current += 4 rdlen -= 4 if rdlen < afdlen: raise dns.exception.FormError address = wire[current : current + afdlen] l = len(address) if header[0] == 1: if l < 4: address += '\x00' * (4 - l) address = dns.inet.inet_ntop(dns.inet.AF_INET, address) elif header[0] == 2: if l < 16: address += '\x00' * (16 - l) address = dns.inet.inet_ntop(dns.inet.AF_INET6, address) else: # # This isn't really right according to the RFC, but it # seems better than throwing an exception # address = address.encode('hex_codec') current += afdlen rdlen -= afdlen item = APLItem(header[0], negation, address, header[1]) items.append(item) if rdlen == 0: break return cls(rdclass, rdtype, items) from_wire = classmethod(from_wire) def _cmp(self, other): f = cStringIO.StringIO() self.to_wire(f) wire1 = f.getvalue() f.seek(0) f.truncate() other.to_wire(f) wire2 = f.getvalue() f.close() return cmp(wire1, wire2) from __future__ import division, print_function import numpy as np from itertools import product import warnings from sklearn import datasets from sklearn import svm from sklearn import ensemble from sklearn.datasets import make_multilabel_classification from sklearn.random_projection import sparse_random_matrix from sklearn.utils.validation import check_array, check_consistent_length from sklearn.utils.validation import check_random_state from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.metrics import auc from sklearn.metrics import auc_score from sklearn.metrics import average_precision_score from sklearn.metrics import label_ranking_average_precision_score from sklearn.metrics import roc_curve from sklearn.metrics import precision_recall_curve from sklearn.metrics import roc_auc_score from sklearn.metrics.base import UndefinedMetricWarning ############################################################################### # Utilities for testing def make_prediction(dataset=None, binary=False): """Make some classification predictions on a toy dataset using a SVC If binary is True restrict to a binary classification problem instead of a multiclass classification problem """ if dataset is None: # import some data to play with dataset = datasets.load_iris() X = dataset.data y = dataset.target if binary: # restrict to a binary classification task X, y = X[y < 2], y[y < 2] n_samples, n_features = X.shape p = np.arange(n_samples) rng = check_random_state(37) rng.shuffle(p) X, y = X[p], y[p] half = int(n_samples / 2) # add noisy features to make the problem harder and avoid perfect results rng = np.random.RandomState(0) X = np.c_[X, rng.randn(n_samples, 200 * n_features)] # run classifier, get class probabilities and label predictions clf = svm.SVC(kernel='linear', probability=True, random_state=0) probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:]) if binary: # only interested in probabilities of the positive case # XXX: do we really want a special API for the binary case? probas_pred = probas_pred[:, 1] y_pred = clf.predict(X[half:]) y_true = y[half:] return y_true, y_pred, probas_pred ############################################################################### # Tests def _auc(y_true, y_score): """Alternative implementation to check for correctness of `roc_auc_score`.""" pos_label = np.unique(y_true)[1] # Count the number of times positive samples are correctly ranked above # negative samples. pos = y_score[y_true == pos_label] neg = y_score[y_true != pos_label] diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1) n_correct = np.sum(diff_matrix > 0) return n_correct / float(len(pos) * len(neg)) def _average_precision(y_true, y_score): """Alternative implementation to check for correctness of `average_precision_score`.""" pos_label = np.unique(y_true)[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_score = y_score[order] y_true = y_true[order] score = 0 for i in range(len(y_score)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec return score / n_pos def test_roc_curve(): """Test Area under Receiver Operating Characteristic (ROC) curve""" y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred) roc_auc = auc(fpr, tpr) expected_auc = _auc(y_true, probas_pred) assert_array_almost_equal(roc_auc, expected_auc, decimal=2) assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred)) assert_almost_equal(roc_auc, ignore_warnings(auc_score)(y_true, probas_pred)) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_end_points(): # Make sure that roc_curve returns a curve start at 0 and ending and # 1 even in corner cases rng = np.random.RandomState(0) y_true = np.array([0] * 50 + [1] * 50) y_pred = rng.randint(3, size=100) fpr, tpr, thr = roc_curve(y_true, y_pred) assert_equal(fpr[0], 0) assert_equal(fpr[-1], 1) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thr.shape) def test_roc_returns_consistency(): """Test whether the returned threshold matches up with tpr""" # make small toy dataset y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred) # use the given thresholds to determine the tpr tpr_correct = [] for t in thresholds: tp = np.sum((probas_pred >= t) & y_true) p = np.sum(y_true) tpr_correct.append(1.0 * tp / p) # compare tpr and tpr_correct to see if the thresholds' order was correct assert_array_almost_equal(tpr, tpr_correct, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_nonrepeating_thresholds(): """Test to ensure that we don't return spurious repeating thresholds. Duplicated thresholds can arise due to machine precision issues. """ dataset = datasets.load_digits() X = dataset['data'] y = dataset['target'] # This random forest classifier can only return probabilities # significant to two decimal places clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0) # How well can the classifier predict whether a digit is less than 5? # This task contributes floating point roundoff errors to the probabilities train, test = slice(None, None, 2), slice(1, None, 2) probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test]) y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here y_true = [yy < 5 for yy in y[test]] # Check for repeating values in the thresholds fpr, tpr, thresholds = roc_curve(y_true, y_score) assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size) def test_roc_curve_multi(): """roc_curve not applicable for multi-class problems""" y_true, _, probas_pred = make_prediction(binary=False) assert_raises(ValueError, roc_curve, y_true, probas_pred) def test_roc_curve_confidence(): """roc_curve for confidence scores""" y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.90, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_hard(): """roc_curve for hard decisions""" y_true, pred, probas_pred = make_prediction(binary=True) # always predict one trivial_pred = np.ones(y_true.shape) fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.50, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # always predict zero trivial_pred = np.zeros(y_true.shape) fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.50, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # hard decisions fpr, tpr, thresholds = roc_curve(y_true, pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.78, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_one_label(): y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1] # assert there are warnings w = UndefinedMetricWarning fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred) # all true labels, all fpr should be nan assert_array_equal(fpr, np.nan * np.ones(len(thresholds))) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # assert there are warnings fpr, tpr, thresholds = assert_warns(w, roc_curve, [1 - x for x in y_true], y_pred) # all negative labels, all tpr should be nan assert_array_equal(tpr, np.nan * np.ones(len(thresholds))) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_toydata(): # Binary classification y_true = [0, 1] y_score = [0, 1] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [1, 1]) assert_almost_equal(roc_auc, 1.) y_true = [0, 1] y_score = [1, 0] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1, 1]) assert_array_almost_equal(fpr, [0, 0, 1]) assert_almost_equal(roc_auc, 0.) y_true = [1, 0] y_score = [1, 1] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, 0.5) y_true = [1, 0] y_score = [1, 0] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [1, 1]) assert_almost_equal(roc_auc, 1.) y_true = [1, 0] y_score = [0.5, 0.5] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, .5) y_true = [0, 0] y_score = [0.25, 0.75] tpr, fpr, _ = roc_curve(y_true, y_score) assert_raises(ValueError, roc_auc_score, y_true, y_score) assert_array_almost_equal(tpr, [0., 0.5, 1.]) assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan]) y_true = [1, 1] y_score = [0.25, 0.75] tpr, fpr, _ = roc_curve(y_true, y_score) assert_raises(ValueError, roc_auc_score, y_true, y_score) assert_array_almost_equal(tpr, [np.nan, np.nan]) assert_array_almost_equal(fpr, [0.5, 1.]) # Multi-label classification task y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [0, 1]]) assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro") assert_raises(ValueError, roc_auc_score, y_true, y_score, average="weighted") assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.) y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro") assert_raises(ValueError, roc_auc_score, y_true, y_score, average="weighted") assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5) def test_auc(): """Test Area Under Curve (AUC) computation""" x = [0, 1] y = [0, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [1, 0] y = [0, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [1, 0, 0] y = [0, 1, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [0, 1] y = [1, 1] assert_array_almost_equal(auc(x, y), 1) x = [0, 0.5, 1] y = [0, 0.5, 1] assert_array_almost_equal(auc(x, y), 0.5) def test_auc_duplicate_values(): # Test Area Under Curve (AUC) computation with duplicate values # auc() was previously sorting the x and y arrays according to the indices # from numpy.argsort(x), which was reordering the tied 0's in this example # and resulting in an incorrect area computation. This test detects the # error. x = [-2.0, 0.0, 0.0, 0.0, 1.0] y1 = [2.0, 0.0, 0.5, 1.0, 1.0] y2 = [2.0, 1.0, 0.0, 0.5, 1.0] y3 = [2.0, 1.0, 0.5, 0.0, 1.0] for y in (y1, y2, y3): assert_array_almost_equal(auc(x, y, reorder=True), 3.0) def test_auc_errors(): # Incompatible shapes assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2]) # Too few x values assert_raises(ValueError, auc, [0.0], [0.1]) # x is not in order assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0]) def test_auc_score_non_binary_class(): """Test that roc_auc_score function returns an error when trying to compute AUC for non-binary class values. """ rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains only one class value y_true = np.zeros(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = -np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", roc_auc_score, y_true, y_pred) with warnings.catch_warnings(record=True): rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains only one class value y_true = np.zeros(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = -np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", roc_auc_score, y_true, y_pred) def test_precision_recall_curve(): y_true, _, probas_pred = make_prediction(binary=True) _test_precision_recall_curve(y_true, probas_pred) # Use {-1, 1} for labels; make sure original labels aren't modified y_true[np.where(y_true == 0)] = -1 y_true_copy = y_true.copy() _test_precision_recall_curve(y_true, probas_pred) assert_array_equal(y_true_copy, y_true) labels = [1, 0, 0, 1] predict_probas = [1, 2, 3, 4] p, r, t = precision_recall_curve(labels, predict_probas) assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.])) assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.])) assert_array_almost_equal(t, np.array([1, 2, 3, 4])) assert_equal(p.size, r.size) assert_equal(p.size, t.size + 1) def test_precision_recall_curve_pos_label(): y_true, _, probas_pred = make_prediction(binary=False) pos_label = 2 p, r, thresholds = precision_recall_curve(y_true, probas_pred[:, pos_label], pos_label=pos_label) p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label, probas_pred[:, pos_label]) assert_array_almost_equal(p, p2) assert_array_almost_equal(r, r2) assert_array_almost_equal(thresholds, thresholds2) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) def _test_precision_recall_curve(y_true, probas_pred): """Test Precision-Recall and aread under PR curve""" p, r, thresholds = precision_recall_curve(y_true, probas_pred) precision_recall_auc = auc(r, p) assert_array_almost_equal(precision_recall_auc, 0.85, 2) assert_array_almost_equal(precision_recall_auc, average_precision_score(y_true, probas_pred)) assert_almost_equal(_average_precision(y_true, probas_pred), precision_recall_auc, 1) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) # Smoke test in the case of proba having only one value p, r, thresholds = precision_recall_curve(y_true, np.zeros_like(probas_pred)) precision_recall_auc = auc(r, p) assert_array_almost_equal(precision_recall_auc, 0.75, 3) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) def test_precision_recall_curve_errors(): # Contains non-binary labels assert_raises(ValueError, precision_recall_curve, [0, 1, 2], [[0.0], [1.0], [1.0]]) def test_precision_recall_curve_toydata(): with np.errstate(all="raise"): # Binary classification y_true = [0, 1] y_score = [0, 1] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [1, 1]) assert_array_almost_equal(r, [1, 0]) assert_almost_equal(auc_prc, 1.) y_true = [0, 1] y_score = [1, 0] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 0., 1.]) assert_array_almost_equal(r, [1., 0., 0.]) assert_almost_equal(auc_prc, 0.25) y_true = [1, 0] y_score = [1, 1] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 1]) assert_array_almost_equal(r, [1., 0]) assert_almost_equal(auc_prc, .75) y_true = [1, 0] y_score = [1, 0] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [1, 1]) assert_array_almost_equal(r, [1, 0]) assert_almost_equal(auc_prc, 1.) y_true = [1, 0] y_score = [0.5, 0.5] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 1]) assert_array_almost_equal(r, [1, 0.]) assert_almost_equal(auc_prc, .75) y_true = [0, 0] y_score = [0.25, 0.75] assert_raises(Exception, precision_recall_curve, y_true, y_score) assert_raises(Exception, average_precision_score, y_true, y_score) y_true = [1, 1] y_score = [0.25, 0.75] p, r, _ = precision_recall_curve(y_true, y_score) assert_almost_equal(average_precision_score(y_true, y_score), 1.) assert_array_almost_equal(p, [1., 1., 1.]) assert_array_almost_equal(r, [1, 0.5, 0.]) # Multi-label classification task y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [0, 1]]) assert_raises(Exception, average_precision_score, y_true, y_score, average="macro") assert_raises(Exception, average_precision_score, y_true, y_score, average="weighted") assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 1.) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 1.) y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_raises(Exception, average_precision_score, y_true, y_score, average="macro") assert_raises(Exception, average_precision_score, y_true, y_score, average="weighted") assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.625) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.625) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_almost_equal(average_precision_score(y_true, y_score, average="macro"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="weighted"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.25) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) assert_almost_equal(average_precision_score(y_true, y_score, average="macro"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="weighted"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.75) def test_score_scale_invariance(): # Test that average_precision_score and roc_auc_score are invariant by # the scaling or shifting of probabilities y_true, _, probas_pred = make_prediction(binary=True) roc_auc = roc_auc_score(y_true, probas_pred) roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred) roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10) assert_equal(roc_auc, roc_auc_scaled) assert_equal(roc_auc, roc_auc_shifted) f = ignore_warnings(auc_score) roc_auc = f(y_true, probas_pred) roc_auc_scaled = f(y_true, 100 * probas_pred) roc_auc_shifted = f(y_true, probas_pred - 10) assert_equal(roc_auc, roc_auc_scaled) assert_equal(roc_auc, roc_auc_shifted) pr_auc = average_precision_score(y_true, probas_pred) pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred) pr_auc_shifted = average_precision_score(y_true, probas_pred - 10) assert_equal(pr_auc, pr_auc_scaled) assert_equal(pr_auc, pr_auc_shifted) def check_lrap_toy(lrap_score): """Check on several small example that it works """ assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1) assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]), (2 / 3 + 1 / 1) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]), (1 / 2 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]), (1 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]), (1 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]), (1 / 2 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1) # Tie handling assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3) assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]), 3 / 4) def check_zero_or_all_relevant_labels(lrap_score): random_state = check_random_state(0) for n_labels in range(2, 5): y_score = random_state.uniform(size=(1, n_labels)) y_score_ties = np.zeros_like(y_score) # No relevant labels y_true = np.zeros((1, n_labels)) assert_equal(lrap_score(y_true, y_score), 1.) assert_equal(lrap_score(y_true, y_score_ties), 1.) # Only relevant labels y_true = np.ones((1, n_labels)) assert_equal(lrap_score(y_true, y_score), 1.) assert_equal(lrap_score(y_true, y_score_ties), 1.) # Degenerate case: only one label assert_almost_equal(lrap_score([[1], [0], [1], [0]], [[0.5], [0.5], [0.5], [0.5]]), 1.) def check_lrap_error_raised(lrap_score): # Raise value error if not appropriate format assert_raises(ValueError, lrap_score, [0, 1, 0], [0.25, 0.3, 0.2]) assert_raises(ValueError, lrap_score, [0, 1, 2], [[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]]) assert_raises(ValueError, lrap_score, [(0), (1), (2)], [[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]]) # Check that that y_true.shape != y_score.shape raise the proper exception assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]]) assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]]) assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]]) def check_lrap_only_ties(lrap_score): """Check tie handling in score""" # Basic check with only ties and increasing label space for n_labels in range(2, 10): y_score = np.ones((1, n_labels)) # Check for growing number of consecutive relevant for n_relevant in range(1, n_labels): # Check for a bunch of positions for pos in range(n_labels - n_relevant): y_true = np.zeros((1, n_labels)) y_true[0, pos:pos + n_relevant] = 1 assert_almost_equal(lrap_score(y_true, y_score), n_relevant / n_labels) def check_lrap_without_tie_and_increasing_score(lrap_score): """ Check that Label ranking average precision works for various""" # Basic check with increasing label space size and decreasing score for n_labels in range(2, 10): y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1) # First and last y_true = np.zeros((1, n_labels)) y_true[0, 0] = 1 y_true[0, -1] = 1 assert_almost_equal(lrap_score(y_true, y_score), (2 / n_labels + 1) / 2) # Check for growing number of consecutive relevant label for n_relevant in range(1, n_labels): # Check for a bunch of position for pos in range(n_labels - n_relevant): y_true = np.zeros((1, n_labels)) y_true[0, pos:pos + n_relevant] = 1 assert_almost_equal(lrap_score(y_true, y_score), sum((r + 1) / ((pos + r + 1) * n_relevant) for r in range(n_relevant))) def _my_lrap(y_true, y_score): """Simple implementation of label ranking average precision""" check_consistent_length(y_true, y_score) y_true = check_array(y_true) y_score = check_array(y_score) n_samples, n_labels = y_true.shape score = np.empty((n_samples, )) for i in range(n_samples): # The best rank correspond to 1. Rank higher than 1 are worse. # The best inverse ranking correspond to n_labels. unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True) n_ranks = unique_rank.size rank = n_ranks - inv_rank # Rank need to be corrected to take into account ties # ex: rank 1 ex aequo means that both label are rank 2. corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum() rank = corr_rank[rank] relevant = y_true[i].nonzero()[0] if relevant.size == 0 or relevant.size == n_labels: score[i] = 1 continue score[i] = 0. for label in relevant: # Let's count the number of relevant label with better rank # (smaller rank). n_ranked_above = sum(rank[r] <= rank[label] for r in relevant) # Weight by the rank of the actual label score[i] += n_ranked_above / rank[label] score[i] /= relevant.size return score.mean() def check_alternative_lrap_implementation(lrap_score, n_classes=5, n_samples=20, random_state=0): _, y_true = make_multilabel_classification(n_features=1, allow_unlabeled=False, return_indicator=True, random_state=random_state, n_classes=n_classes, n_samples=n_samples) # Score with ties y_score = sparse_random_matrix(n_components=y_true.shape[0], n_features=y_true.shape[1], random_state=random_state) if hasattr(y_score, "toarray"): y_score = y_score.toarray() score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap) # Uniform score random_state = check_random_state(random_state) y_score = random_state.uniform(size=(n_samples, n_classes)) score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap) def test_label_ranking_avp(): for fn in [label_ranking_average_precision_score, _my_lrap]: yield check_lrap_toy, fn yield check_lrap_without_tie_and_increasing_score, fn yield check_lrap_only_ties, fn yield check_zero_or_all_relevant_labels, fn yield check_lrap_error_raised, label_ranking_average_precision_score for n_samples, n_classes, random_state in product((1, 2, 8, 20), (2, 5, 10), range(1)): yield (check_alternative_lrap_implementation, label_ranking_average_precision_score, n_classes, n_samples, random_state) # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.service_client import ServiceClient from msrest import Configuration, Serializer, Deserializer from .version import VERSION from .operations.bool_model_operations import BoolModelOperations from . import models class AutoRestBoolTestServiceConfiguration(Configuration): """Configuration for AutoRestBoolTestService Note that all parameters used to create this instance are saved as instance attributes. :param str base_url: Service URL :param str filepath: Existing config """ def __init__( self, base_url=None, filepath=None): if not base_url: base_url = 'http://localhost' super(AutoRestBoolTestServiceConfiguration, self).__init__(base_url, filepath) self.add_user_agent('autorestbooltestservice/{}'.format(VERSION)) class AutoRestBoolTestService(object): """Test Infrastructure for AutoRest :ivar config: Configuration for client. :vartype config: AutoRestBoolTestServiceConfiguration :ivar bool_model: BoolModel operations :vartype bool_model: .operations.BoolModelOperations :param str base_url: Service URL :param str filepath: Existing config """ def __init__( self, base_url=None, filepath=None): self.config = AutoRestBoolTestServiceConfiguration(base_url, filepath) self._client = ServiceClient(None, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self.bool_model = BoolModelOperations( self._client, self.config, self._serialize, self._deserialize) #!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Checks that files whose file case changes get rebuilt correctly. """ import os import TestGyp test = TestGyp.TestGyp() CHDIR = 'filecase' test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', test.ALL, chdir=CHDIR) os.rename('filecase/file.c', 'filecase/fIlE.c') test.write('filecase/test.gyp', test.read('filecase/test.gyp').replace('file.c', 'fIlE.c')) test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', test.ALL, chdir=CHDIR) # Check that having files that differ just in their case still work on # case-sensitive file systems. test.write('filecase/FiLe.c', 'int f(); int main() { return f(); }') test.write('filecase/fIlE.c', 'int f() { return 42; }') is_case_sensitive = test.read('filecase/FiLe.c') != test.read('filecase/fIlE.c') if is_case_sensitive: test.run_gyp('test-casesensitive.gyp', chdir=CHDIR) test.build('test-casesensitive.gyp', test.ALL, chdir=CHDIR) test.pass_test() #!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, Brian Coca # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'curated'} DOCUMENTATION = ''' --- version_added: "1.2" module: jabber short_description: Send a message to jabber user or chat room description: - Send a message to jabber options: user: description: - User as which to connect required: true password: description: - password for user to connect required: true to: description: - user ID or name of the room, when using room use a slash to indicate your nick. required: true msg: description: - The message body. required: true default: null host: description: - host to connect, overrides user info required: false port: description: - port to connect to, overrides default required: false default: 5222 encoding: description: - message encoding required: false # informational: requirements for nodes requirements: - python xmpp (xmpppy) author: "Brian Coca (@bcoca)" ''' EXAMPLES = ''' # send a message to a user - jabber: user: mybot@example.net password: secret to: friend@example.net msg: Ansible task finished # send a message to a room - jabber: user: mybot@example.net password: secret to: mychaps@conference.example.net/ansiblebot msg: Ansible task finished # send a message, specifying the host and port - jabber: user: mybot@example.net host: talk.example.net port: 5223 password: secret to: mychaps@example.net msg: Ansible task finished ''' import os import re import time HAS_XMPP = True try: import xmpp except ImportError: HAS_XMPP = False def main(): module = AnsibleModule( argument_spec=dict( user=dict(required=True), password=dict(required=True, no_log=True), to=dict(required=True), msg=dict(required=True), host=dict(required=False), port=dict(required=False,default=5222), encoding=dict(required=False), ), supports_check_mode=True ) if not HAS_XMPP: module.fail_json(msg="The required python xmpp library (xmpppy) is not installed") jid = xmpp.JID(module.params['user']) user = jid.getNode() server = jid.getDomain() port = module.params['port'] password = module.params['password'] try: to, nick = module.params['to'].split('/', 1) except ValueError: to, nick = module.params['to'], None if module.params['host']: host = module.params['host'] else: host = server if module.params['encoding']: xmpp.simplexml.ENCODING = params['encoding'] msg = xmpp.protocol.Message(body=module.params['msg']) try: conn=xmpp.Client(server, debug=[]) if not conn.connect(server=(host,port)): module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) if not conn.auth(user,password,'Ansible'): module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user,server)) # some old servers require this, also the sleep following send conn.sendInitPresence(requestRoster=0) if nick: # sending to room instead of user, need to join msg.setType('groupchat') msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') conn.send(xmpp.Presence(to=module.params['to'])) time.sleep(1) else: msg.setType('chat') msg.setTo(to) if not module.check_mode: conn.send(msg) time.sleep(1) conn.disconnect() except Exception: e = get_exception() module.fail_json(msg="unable to send msg: %s" % e) module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.pycompat24 import get_exception if __name__ == '__main__': main() #!/usr/bin/env python # encoding: utf-8 """ consolegrid_provider.py Created by Scott on 2013-12-26. Copyright (c) 2013 Scott Rice. All rights reserved. """ import sys import os import urllib import urllib2 import grid_image_provider from ice.logs import logger class ConsoleGridProvider(grid_image_provider.GridImageProvider): @staticmethod def api_url(): return "http://consolegrid.com/api/top_picture" @staticmethod def is_enabled(): # TODO: Return True/False based on the current network status return True def consolegrid_top_picture_url(self, rom): host = self.api_url() quoted_name = urllib.quote(rom.name) return "%s?console=%s&game=%s" % (host, rom.console.shortname, quoted_name) def find_url_for_rom(self, rom): """ Determines a suitable grid image for a given ROM by hitting ConsoleGrid.com """ try: response = urllib2.urlopen(self.consolegrid_top_picture_url(rom)) if response.getcode() == 204: name = rom.name console = rom.console.fullname logger.debug( "ConsoleGrid has no game called `%s` for %s" % (name, console) ) else: return response.read() except urllib2.URLError as error: # Connection was refused. ConsoleGrid may be down, or something bad # may have happened logger.debug( "No image was downloaded due to an error with ConsoleGrid" ) def download_image(self, url): """ Downloads the image at 'url' and returns the path to the image on the local filesystem """ (path, headers) = urllib.urlretrieve(url) return path def image_for_rom(self, rom): image_url = self.find_url_for_rom(rom) if image_url is None or image_url == "": return None return self.download_image(image_url) #!/usr/bin/env python # # Check trace components in FreeType 2 source. # Author: suzuki toshiya, 2009, 2013 # # This code is explicitly into the public domain. import sys import os import re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [ "src" ] TRACE_DEF_FILES = [ "include/internal/fttrace.h" ] # -------------------------------------------------------------- # Parse command line options # for i in range( 1, len( sys.argv ) ): if sys.argv[i].startswith( "--help" ): print "Usage: %s [option]" % sys.argv[0] print "Search used-but-defined and defined-but-not-used trace_XXX macros" print "" print " --help:" print " Show this help" print "" print " --src-dirs=dir1:dir2:..." print " Specify the directories of C source files to be checked" print " Default is %s" % ":".join( SRC_FILE_DIRS ) print "" print " --def-files=file1:file2:..." print " Specify the header files including FT_TRACE_DEF()" print " Default is %s" % ":".join( TRACE_DEF_FILES ) print "" exit(0) if sys.argv[i].startswith( "--src-dirs=" ): SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" ) elif sys.argv[i].startswith( "--def-files=" ): TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" ) # -------------------------------------------------------------- # Scan C source and header files using trace macros. # c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE ) trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' ) for d in SRC_FILE_DIRS: for ( p, dlst, flst ) in os.walk( d ): for f in flst: if c_pathname_pat.match( f ) != None: src_pathname = os.path.join( p, f ) line_num = 0 for src_line in open( src_pathname, 'r' ): line_num = line_num + 1 src_line = src_line.strip() if trace_use_pat.match( src_line ) != None: component_name = trace_use_pat.sub( '', src_line ) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) ) else: USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ] # -------------------------------------------------------------- # Scan header file(s) defining trace macros. # trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' ) trace_def_pat_cls = re.compile( '[ \t\)].*$' ) for f in TRACE_DEF_FILES: line_num = 0 for hdr_line in open( f, 'r' ): line_num = line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match( hdr_line ) != None: component_name = trace_def_pat_opn.sub( '', hdr_line ) component_name = trace_def_pat_cls.sub( '', component_name ) if component_name in KNOWN_COMPONENT: print "trace component %s is defined twice, see %s and fttrace.h:%d" % \ ( component_name, KNOWN_COMPONENT[component_name], line_num ) else: KNOWN_COMPONENT[component_name] = "%s:%d" % \ ( os.path.basename( f ), line_num ) # -------------------------------------------------------------- # Compare the used and defined trace macros. # print "# Trace component used in the implementations but not defined in fttrace.h." cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in KNOWN_COMPONENT: print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) ) print "# Trace component is defined but not used in the implementations." cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in USED_COMPONENT: if c != "any": print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] ) #!/usr/bin/python import settestpath # lots of useful util methods for building/tearing down # test enviroments... import testutils from up2date_client import config import unittest test_up2date = "etc-sysconfig-rhn/up2date" class TestConfig(unittest.TestCase): def setUp(self): # in this stuff, we get weird stuff existing, so restore # a config first, then change anything test specifc testutils.restoreConfig() self.__setupData() def __setupData(self): pass def tearDown(self): config.cfg == None testutils.restoreConfig() def testEmptyInit(self): "Verify that the class can be created with no arguments" cfg = config.initUp2dateConfig(test_up2date) def testConfigString(self): "Verify that Config loads a string as a string" cfg = config.initUp2dateConfig(test_up2date) assert isinstance(cfg['systemIdPath'], basestring) def testConfigListSingleItem(self): "Verify that Config loads a list of one as a list" cfg = config.initUp2dateConfig(test_up2date) assert type(cfg['pkgSkipList']) == type([]) def testConfigList(self): "Verify that Config loads a list as a list" cfg = config.initUp2dateConfig(test_up2date) assert type(cfg['disallowConfChanges']) == type([]) def testConfigBool(self): "Verify that Config loads a bool int as a bool" cfg = config.initUp2dateConfig(test_up2date) assert type(cfg['enableProxy']) == type(1) def testConfigSave(self): "Verify that Config saves a file without error" cfg = config.initUp2dateConfig(test_up2date) cfg.save() def testConfigSetItem(self): "Verify that Config.__setitem__ works" cfg = config.initUp2dateConfig(test_up2date) cfg['blippyfoobarbazblargh'] = 1 assert cfg['blippyfoobarbazblargh'] == 1 def testConfigInfo(self): "Verify that Config.into() runs without error" cfg = config.initUp2dateConfig(test_up2date) blargh = cfg.info('enableProxy') def testConfigRuntimeStore(self): "Verify that values Config['value'] are set for runtime only and not saved" cfg = config.initUp2dateConfig(test_up2date) cfg['blippy12345'] = "wantafreehat?" cfg.save() # cfg is a fairly persistent singleton, blow it awy to get a new referece del config.cfg cfg2 = config.initUp2dateConfig(test_up2date) # if this returns a value, it means we saved the config file... assert cfg2['blippy12345'] == None def testConfigRuntimeStoreNoDir(self): "Verify that saving a file into a non existent dir works" # bugzilla: 125179 cfg = config.initUp2dateConfig(test_up2date) cfg['blippy321'] = "blumblim" cfg.save() def testConfigKeysReturnsAList(self): "Verify that Config.keys() returns a list" cfg = config.initUp2dateConfig(test_up2date) blip = cfg.keys() assert type(blip) == type([]) def testConfigKeys(self): "Verify that Config.keys() returns a list with the right stuff" cfg = config.initUp2dateConfig(test_up2date) blip = cfg.keys() assert "enableProxy" in blip def testConfigHasKeyDoesntExist(self): "Verify that Config.has_key() is correct on non existent keys" cfg = config.initUp2dateConfig(test_up2date) assert cfg.has_key("234wfj34ruafho34rhkfe") == 0 def testConfigHasKeyDoesExist(self): "Verify that Config.has_key() is correct on existing keys" cfg = config.initUp2dateConfig(test_up2date) assert cfg.has_key("enableProxy") == 1 def testConfigHasKeyRuntime(self): "Verify that Config.has_key() is correct for runtime keys" cfg = config.initUp2dateConfig(test_up2date) cfg['runtimekey'] = "blippy" assert cfg.has_key('runtimekey') == 1 def testConfigValues(self): "Verify that Config.values() runs without error" cfg = config.initUp2dateConfig(test_up2date) ret = cfg.values() assert type(ret) == type([]) def testConfigItems(self): "Verify that Config.items() runs without error" cfg = config.initUp2dateConfig(test_up2date) ret = cfg.items() assert type(ret) == type([]) def testConfigSet(self): "Verify that Config.set() sets items into the persistent layer" cfg = config.initUp2dateConfig(test_up2date) cfg.set("permItem", 1) assert cfg.stored["permItem"] == 1 def testConfigSetOverride(self): "Verify that Config.set() sets items in the persitent layer, overriding runtime" cfg = config.initUp2dateConfig(test_up2date) cfg['semiPermItem'] = 1 cfg.set('semiPermItem',0) assert cfg.stored['semiPermItem'] == 0 def testConfigLoad(self): "Verify that Config.load() works without exception" cfg = config.initUp2dateConfig(test_up2date) cfg.load("/etc/sysconfig/rhn/up2date") def testNetworkConfig(self): "Verify that the NetworkConfig class can be created" nc = config.NetworkConfig() def testNetworkConfigLoad(self): "Verify that NetworkConfig.load() runs without error" nc = config.NetworkConfig() nc.load() def testNetworkConfigLoadCorrectness(self): "Verify that NetworkConfig.load() runs and gets the right info" testutils.setupConfig("fc2-rpmmd-sources-1") nc = config.NetworkConfig() nc.load() assert nc['blargh'] == "blippyfoo" def testNetworkConfigLoadCorrectnessOverrides(self): "Verify that NetworkConfig.load() runs and overrides the default value" testutils.setupConfig("fc2-rpmmd-sources-1") nc = config.NetworkConfig() nc.load() assert nc['serverURL'] == "http://www.hokeypokeyland.com/XMLRPC" class TestGetProxySetting(unittest.TestCase): def setUp(self): self.cfg = config.initUp2dateConfig(test_up2date) self.proxy1 = "http://proxy.company.com:8080" self.proxy2 = "proxy.company.com:8080" def testHttpSpecified(self): "Verify that http:// gets stripped from proxy settings" self.cfg['httpProxy'] = self.proxy1 res = up2dateUtils.getProxySetting() assert res == "proxy.company.com:8080" def testHttpUnSpecified(self): "Verify that proxies with no http:// work correctly" self.cfg['httpProxy'] = self.proxy2 res = up2dateUtils.getProxySetting() assert res == "proxy.company.com:8080" def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(TestConfig)) suite.addTest(unittest.makeSuite(TestGetProxySetting)) return suite if __name__ == "__main__": unittest.main(defaultTest="suite") #!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Matt Wright # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: pip short_description: Manages Python library dependencies. description: - "Manage Python library dependencies. To use this module, one of the following keys is required: C(name) or C(requirements)." version_added: "0.7" options: name: description: - The name of a Python library to install or the url of the remote package. - As of 2.2 you can supply a list of names. required: false default: null version: description: - The version number to install of the Python library specified in the I(name) parameter required: false default: null requirements: description: - The path to a pip requirements file, which should be local to the remote system. File can be specified as a relative path if using the chdir option. required: false default: null virtualenv: description: - An optional path to a I(virtualenv) directory to install into. It cannot be specified together with the 'executable' parameter (added in 2.1). If the virtualenv does not exist, it will be created before installing packages. The optional virtualenv_site_packages, virtualenv_command, and virtualenv_python options affect the creation of the virtualenv. required: false default: null virtualenv_site_packages: version_added: "1.0" description: - Whether the virtual environment will inherit packages from the global site-packages directory. Note that if this setting is changed on an already existing virtual environment it will not have any effect, the environment must be deleted and newly created. required: false default: "no" choices: [ "yes", "no" ] virtualenv_command: version_added: "1.1" description: - The command or a pathname to the command to create the virtual environment with. For example C(pyvenv), C(virtualenv), C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv). required: false default: virtualenv virtualenv_python: version_added: "2.0" description: - The Python executable used for creating the virtual environment. For example C(python3.5), C(python2.7). When not specified, the Python version used to run the ansible module is used. This parameter should not be used when C(virtualenv_command) is using C(pyvenv) or the C(-m venv) module. required: false default: null state: description: - The state of module - The 'forcereinstall' option is only available in Ansible 2.1 and above. required: false default: present choices: [ "present", "absent", "latest", "forcereinstall" ] extra_args: description: - Extra arguments passed to pip. required: false default: null version_added: "1.0" editable: description: - Pass the editable flag. required: false default: false version_added: "2.0" chdir: description: - cd into this directory before running the command version_added: "1.3" required: false default: null executable: description: - The explicit executable or a pathname to the executable to be used to run pip for a specific version of Python installed in the system. For example C(pip-3.3), if there are both Python 2.7 and 3.3 installations in the system and you want to run pip for the Python 3.3 installation. It cannot be specified together with the 'virtualenv' parameter (added in 2.1). By default, it will take the appropriate version for the python interpreter use by ansible, e.g. pip3 on python 3, and pip2 or pip on python 2. version_added: "1.3" required: false default: null umask: description: - The system umask to apply before installing the pip package. This is useful, for example, when installing on systems that have a very restrictive umask by default (e.g., 0077) and you want to pip install packages which are to be used by all users. Note that this requires you to specify desired umask mode in octal, with a leading 0 (e.g., 0077). version_added: "2.1" required: false default: null notes: - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified and the virtualenv needs to be created. - By default, this module will use the appropriate version of pip for the interpreter used by ansible (e.g. pip3 when using python 3, pip2 otherwise) requirements: [ "virtualenv", "pip" ] author: "Matt Wright (@mattupstate)" ''' EXAMPLES = ''' # Install (Bottle) python package. - pip: name: bottle # Install (Bottle) python package on version 0.11. - pip: name: bottle version: 0.11 # Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args. - pip: name: svn+http://myrepo/svn/MyApp#egg=MyApp # Install MyApp using one of the remote protocols (bzr+,hg+,git+). - pip: name: git+http://myrepo/app/MyApp # Install (MyApp) from local tarball - pip: name: file:///path/to/MyApp.tar.gz # Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules - pip: name: bottle virtualenv: /my_app/venv # Install (Bottle) into the specified (virtualenv), inheriting globally installed modules - pip: name: bottle virtualenv: /my_app/venv virtualenv_site_packages: yes # Install (Bottle) into the specified (virtualenv), using Python 2.7 - pip: name: bottle virtualenv: /my_app/venv virtualenv_command: virtualenv-2.7 # Install (Bottle) within a user home directory. - pip: name: bottle extra_args: --user # Install specified python requirements. - pip: requirements: /my_app/requirements.txt # Install specified python requirements in indicated (virtualenv). - pip: requirements: /my_app/requirements.txt virtualenv: /my_app/venv # Install specified python requirements and custom Index URL. - pip: requirements: /my_app/requirements.txt extra_args: -i https://example.com/pypi/simple # Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable. - pip: name: bottle executable: pip-3.3 # Install (Bottle), forcing reinstallation if it's already installed - pip: name: bottle state: forcereinstall # Install (Bottle) while ensuring the umask is 0022 (to ensure other users can use it) - pip: name: bottle umask: 0022 become: True ''' import os import re import sys import tempfile from ansible.module_utils.basic import AnsibleModule, is_executable from ansible.module_utils._text import to_native from ansible.module_utils.six import PY3 #: Python one-liners to be run at the command line that will determine the # installed version for these special libraries. These are libraries that # don't end up in the output of pip freeze. _SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)', 'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'} def _get_cmd_options(module, cmd): thiscmd = cmd + " --help" rc, stdout, stderr = module.run_command(thiscmd) if rc != 0: module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr)) words = stdout.strip().split() cmd_options = [x for x in words if x.startswith('--')] return cmd_options def _get_full_name(name, version=None): if version is None: resp = name else: resp = name + '==' + version return resp def _get_packages(module, pip, chdir): '''Return results of pip command to get packages.''' # Try 'pip list' command first. command = '%s list --format=freeze' % pip lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env) # If there was an error (pip version too old) then use 'pip freeze'. if rc != 0: command = '%s freeze' % pip rc, out, err = module.run_command(command, cwd=chdir) if rc != 0: _fail(module, command, out, err) return (command, out, err) def _is_present(name, version, installed_pkgs, pkg_command): '''Return whether or not package is installed.''' for pkg in installed_pkgs: if '==' in pkg: pkg_name, pkg_version = pkg.split('==') else: continue if pkg_name == name and (version is None or version == pkg_version): return True return False def _get_pip(module, env=None, executable=None): # Older pip only installed under the "/usr/bin/pip" name. Many Linux # distros install it there. # By default, we try to use pip required for the current python # interpreter, so people can use pip to install modules dependencies candidate_pip_basenames = ('pip2', 'pip') if PY3: # pip under python3 installs the "/usr/bin/pip3" name candidate_pip_basenames = ('pip3',) pip = None if executable is not None: if os.path.isabs(executable): pip = executable else: # If you define your own executable that executable should be the only candidate. # As noted in the docs, executable doesn't work with virtualenvs. candidate_pip_basenames = (executable,) if pip is None: if env is None: opt_dirs = [] for basename in candidate_pip_basenames: pip = module.get_bin_path(basename, False, opt_dirs) if pip is not None: break else: # For-else: Means that we did not break out of the loop # (therefore, that pip was not found) module.fail_json(msg='Unable to find any of %s to use. pip' ' needs to be installed.' % ', '.join(candidate_pip_basenames)) else: # If we're using a virtualenv we must use the pip from the # virtualenv venv_dir = os.path.join(env, 'bin') candidate_pip_basenames = (candidate_pip_basenames[0], 'pip') for basename in candidate_pip_basenames: candidate = os.path.join(venv_dir, basename) if os.path.exists(candidate) and is_executable(candidate): pip = candidate break else: # For-else: Means that we did not break out of the loop # (therefore, that pip was not found) module.fail_json(msg='Unable to find pip in the virtualenv,' ' %s, under any of these names: %s. Make sure pip is' ' present in the virtualenv.' % (env, ', '.join(candidate_pip_basenames))) return pip def _fail(module, cmd, out, err): msg = '' if out: msg += "stdout: %s" % (out, ) if err: msg += "\n:stderr: %s" % (err, ) module.fail_json(cmd=cmd, msg=msg) def _get_package_info(module, package, env=None): """This is only needed for special packages which do not show up in pip freeze pip and setuptools fall into this category. :returns: a string containing the version number if the package is installed. None if the package is not installed. """ if env: opt_dirs = ['%s/bin' % env] else: opt_dirs = [] python_bin = module.get_bin_path('python', False, opt_dirs) if python_bin is None: formatted_dep = None else: rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]]) if rc: formatted_dep = None else: formatted_dep = '%s==%s' % (package, out.strip()) return formatted_dep def main(): state_map = dict( present='install', absent='uninstall -y', latest='install -U', forcereinstall='install -U --force-reinstall', ) module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=state_map.keys()), name=dict(type='list'), version=dict(type='str'), requirements=dict(type='str'), virtualenv=dict(type='path'), virtualenv_site_packages=dict(default=False, type='bool'), virtualenv_command=dict(default='virtualenv', type='path'), virtualenv_python=dict(type='str'), use_mirrors=dict(default=True, type='bool'), extra_args=dict(type='str'), editable=dict(default=False, type='bool'), chdir=dict(type='path'), executable=dict(type='path'), umask=dict(type='str'), ), required_one_of=[['name', 'requirements']], mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']], supports_check_mode=True ) state = module.params['state'] name = module.params['name'] version = module.params['version'] requirements = module.params['requirements'] extra_args = module.params['extra_args'] virtualenv_python = module.params['virtualenv_python'] chdir = module.params['chdir'] umask = module.params['umask'] if umask and not isinstance(umask, int): try: umask = int(umask, 8) except Exception: module.fail_json(msg="umask must be an octal integer", details=to_native(sys.exc_info()[1])) old_umask = None if umask is not None: old_umask = os.umask(umask) try: if state == 'latest' and version is not None: module.fail_json(msg='version is incompatible with state=latest') if chdir is None: # this is done to avoid permissions issues with privilege escalation and virtualenvs chdir = tempfile.gettempdir() err = '' out = '' env = module.params['virtualenv'] if env: if not os.path.exists(os.path.join(env, 'bin', 'activate')): if module.check_mode: module.exit_json(changed=True) cmd = module.params['virtualenv_command'] if os.path.basename(cmd) == cmd: cmd = module.get_bin_path(cmd, True) if module.params['virtualenv_site_packages']: cmd += ' --system-site-packages' else: cmd_opts = _get_cmd_options(module, cmd) if '--no-site-packages' in cmd_opts: cmd += ' --no-site-packages' # -p is a virtualenv option, not compatible with pyenv or venv # this if validates if the command being used is not any of them if not any(ex in module.params['virtualenv_command'] for ex in ('pyvenv', '-m venv')): if virtualenv_python: cmd += ' -p%s' % virtualenv_python elif PY3: # Ubuntu currently has a patch making virtualenv always # try to use python2. Since Ubuntu16 works without # python2 installed, this is a problem. This code mimics # the upstream behaviour of using the python which invoked # virtualenv to determine which python is used inside of # the virtualenv (when none are specified). cmd += ' -p%s' % sys.executable # if venv or pyvenv are used and virtualenv_python is defined, then # virtualenv_python is ignored, this has to be acknowledged elif module.params['virtualenv_python']: module.fail_json( msg='virtualenv_python should not be used when' ' using the venv module or pyvenv as virtualenv_command' ) cmd = "%s %s" % (cmd, env) rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir) out += out_venv err += err_venv if rc != 0: _fail(module, cmd, out, err) pip = _get_pip(module, env, module.params['executable']) cmd = '%s %s' % (pip, state_map[state]) # If there's a virtualenv we want things we install to be able to use other # installations that exist as binaries within this virtualenv. Example: we # install cython and then gevent -- gevent needs to use the cython binary, # not just a python package that will be found by calling the right python. # So if there's a virtualenv, we add that bin/ to the beginning of the PATH # in run_command by setting path_prefix here. path_prefix = None if env: path_prefix = "/".join(pip.split('/')[:-1]) # Automatically apply -e option to extra_args when source is a VCS url. VCS # includes those beginning with svn+, git+, hg+ or bzr+ has_vcs = False if name: for pkg in name: if bool(pkg and re.match(r'(svn|git|hg|bzr)\+', pkg)): has_vcs = True break if module.params['editable']: args_list = [] # used if extra_args is not used at all if extra_args: args_list = extra_args.split(' ') if '-e' not in args_list: args_list.append('-e') # Ok, we will reconstruct the option string extra_args = ' '.join(args_list) if extra_args: cmd += ' %s' % extra_args if name: for pkg in name: cmd += ' %s' % _get_full_name(pkg, version) else: if requirements: cmd += ' -r %s' % requirements if module.check_mode: if extra_args or requirements or state == 'latest' or not name: module.exit_json(changed=True) pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir) out += out_pip err += err_pip changed = False if name: pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p] if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name): # Older versions of pip (pre-1.3) do not have pip list. # pip freeze does not list setuptools or pip in its output # So we need to get those via a specialcase for pkg in ('setuptools', 'pip'): if pkg in name: formatted_dep = _get_package_info(module, pkg, env) if formatted_dep is not None: pkg_list.append(formatted_dep) out += '%s\n' % formatted_dep