cv2.imshow('Preview', self.frame)
self.debugger.display()
else:
cv2.imshow('Detected Card', self.detected_card)
self.handleKey(cv2.waitKey(1) & 0xFF, frame)
if (self.captureDevice is not None):
self.captureDevice.release()
cv2.destroyAllWindows()
def detectCard(self):
"""Detect the card from the active frame
"""
# The phash python bindings operate on files, so we have to write our
# current frame to a file to continue
cv2.imwrite('frame.jpg', self.frame)
# Use phash on our frame
ihash = phash.dct_imagehash('frame.jpg')
idigest = phash.image_digest('frame.jpg')
candidates = {}
hashes = self.referencedb.get_hashes()
for MultiverseID in hashes:
if (MultiverseID in self.blacklist):
continue
hamd = phash.hamming_distance(ihash, int(hashes[MultiverseID]))
if (hamd <= self.threshold):
candidates[MultiverseID] = hamd
if (not len(candidates)):
print('No matches found')
return None
finalists = []
minV = min(candidates.values())
for MultiverseID in candidates:
if (candidates[MultiverseID] == minV):
finalists.append(MultiverseID)
bestMatch = None
correlations = {}
for MultiverseID in finalists:
hamd = candidates[MultiverseID]
digest = phash.image_digest(
self.referencedb.IMAGE_FILE % MultiverseID)
corr = phash.cross_correlation(idigest, digest)
if (bestMatch is None or corr > correlations[bestMatch]):
bestMatch = MultiverseID
correlations[MultiverseID] = corr
return bestMatch
def handleKey(self, key, frame):
if (self.detected_card is None):
if (key == 8 or key == 27):
self.bApplyTransforms = not self.bApplyTransforms
elif (key == ord('d')):
self.debugger.toggle()
elif (key == 171):
self.detected_id = self.previous_id
if (self.detected_id is not None):
self.detected_card = cv2.imread(
self.referencedb.IMAGE_FILE % self.detected_id,
cv2.IMREAD_UNCHANGED)
elif (key == 10):
if (not self.bApplyTransforms):
self.bApplyTransforms = True
else:
self.detected_id = self.detectCard()
if (self.detected_id is not None):
self.detected_card = cv2.imread(
self.referencedb.IMAGE_FILE % self.detected_id,
cv2.IMREAD_UNCHANGED)
else:
if (key == ord('n')):
cv2.destroyWindow('Detected Card')
self.blacklist.append(self.detected_id)
self.detected_id = self.detectCard()
if (self.detected_id is not None):
self.detected_card = cv2.imread(
self.referencedb.IMAGE_FILE % self.detected_id,
cv2.IMREAD_UNCHANGED)
if (key == ord('p')):
self.blacklist = []
for i in range(0, 4):
self.storagedb.add_card(self.detected_id, 0)
name, code = self.referencedb.get_card_info(self.detected_id)
print('Added 4x ' + name + '[' + code + ']...')
self.previous_id = self.detected_id
self.detected_card = None
self.detected_id = None
self.bApplyTransforms = False
cv2.destroyWindow('Detected Card')
if (key == 10 or key == ord('y')):
self.blacklist = []
self.storagedb.add_card(self.detected_id, 0)
name, code = self.referencedb.get_card_info(self.detected_id)
print('Added ' + name + '[' + code + ']...')
self.previous_id = self.detected_id
self.detected_card = None
self.detected_id = None
self.bApplyTransforms = False
cv2.destroyWindow('Detected Card')
if (key == ord('f')):
self.blacklist = []
self.storagedb.add_card(self.detected_id, 1)
name, code = self.referencedb.get_card_info(self.detected_id)
print('Added foil ' + name + '[' + code + ']...')
self.previous_id = self.detected_id
self.detected_card = None
self.detected_id = None
self.bApplyTransforms = False
cv2.destroyWindow('Detected Card')
elif (key == 8 or key == 27):
self.blacklist = []
self.detected_card = None
self.detected_id = None
self.bApplyTransforms = False
cv2.destroyWindow('Detected Card')
if (key == ord('q')):
self.running = False
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
from waflib.Tools import ar,d
from waflib.Configure import conf
@conf
def find_ldc2(conf):
conf.find_program(['ldc2'],var='D')
out=conf.cmd_and_log([conf.env.D,'-version'])
if out.find("based on DMD v2.")==-1:
conf.fatal("detected compiler is not ldc2")
@conf
def common_flags_ldc2(conf):
v=conf.env
v['D_SRC_F']=['-c']
v['D_TGT_F']='-of%s'
v['D_LINKER']=v['D']
v['DLNK_SRC_F']=''
v['DLNK_TGT_F']='-of%s'
v['DINC_ST']='-I%s'
v['DSHLIB_MARKER']=v['DSTLIB_MARKER']=''
v['DSTLIB_ST']=v['DSHLIB_ST']='-L-l%s'
v['DSTLIBPATH_ST']=v['DLIBPATH_ST']='-L-L%s'
v['LINKFLAGS_dshlib']=['-L-shared']
v['DHEADER_ext']='.di'
v['DFLAGS_d_with_header']=['-H','-Hf']
v['D_HDR_F']='%s'
v['LINKFLAGS']=[]
v['DFLAGS_dshlib']=['-relocation-model=pic']
def configure(conf):
conf.find_ldc2()
conf.load('ar')
conf.load('d')
conf.common_flags_ldc2()
conf.d_platform_flags()
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
'''
import pygame
import ConfigParser
import sys
import os
import urllib2
import urllib
import StringIO
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class PiInfoScreen():
# Set default names
pluginname = "UNDEFINED"
plugininfo = "You should set pluginname and plugininfo in your plugin subclass"
# List of screen sizes supported by the script
supportedsizes = [ (694,466) ]
# Refresh time = how often the data on the screen should be updated (seconds)
refreshtime = 30
# How long screen should be displayed before moving on to next screen (seconds)
# only relevant when screen is autmatically changing screens
# rather than waiting for key press
displaytime = 5
# Read the plugin's config file and dump contents to a dictionary
def readConfig(self):
class AutoVivification(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
self.pluginConfig = AutoVivification()
try:
config = ConfigParser.ConfigParser()
config.read(self.configfile)
for section in config.sections():
for option in config.options(section):
self.pluginConfig[section][option] = config.get(section,option)
except:
pass
self.setPluginVariables()
# Can be overriden to allow plugin to change option type
# Default method is to treat all options as strings
# If option needs different type (bool, int, float) then this should be
# done here
# Alternatively, plugin can just read variables from the pluginConfig
# dictionary that's created
# Any other variables (colours, fonts etc.) should be defined here
def setPluginVariables(self):
pass
# Tells the main script that the plugin is compatible with the requested
# screen size
def supported(self):
return self.supported
# Returns the refresh time
def refreshtime(self):
return self.refreshtime
# Returns the display time
def displaytime(self):
return self.displaytime
# Returns a short description of the script
# displayed when user requests list of installed plugins
def showInfo(self):
return self.plugininfo
# Returns name of the plugin
def screenName(self):
return self.pluginname
# Handle button events
# These should be overriden by screens if required
def Button1Click(self):
pass
def Button2Click(self):
pass
def Button3Click(self):
pass
def Button3Click(self):
pass
# Get web page
def getPage(self, url):
user_agent = 'Mozilla/5 (Solaris 10) Gecko'
headers = { 'User-Agent' : user_agent }
request = urllib2.Request(url)
response = urllib2.urlopen(request)
the_page = response.read()
return the_page
# Function to get image and return in format pygame can use
def LoadImageFromUrl(self, url, solid = False):
f = urllib.urlopen(url)
buf = StringIO.StringIO(f.read())
image = self.LoadImage(buf, solid)
return image
def LoadImage(self, fileName, solid = False):
image = pygame.image.load(fileName)
image = image.convert()
if not solid:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image
# Draws a progress bar
def showProgress(self, position, barsize,
bordercolour, fillcolour, bgcolour):
try:
if position < 0 : position = 0
if position > 1 : position = 1
except:
position = 0
progress = pygame.Surface(barsize)
pygame.draw.rect(progress,bgcolour,(0,0,barsize[0],barsize[1]))
progresswidth = int(barsize[0] * position)
pygame.draw.rect(progress,fillcolour,(0,0,progresswidth,barsize[1]))
pygame.draw.rect(progress,bordercolour,(0,0,barsize[0],barsize[1]),1)
return progress
def render_textrect(self, string, font, rect, text_color,
background_color, justification=0, vjustification=0,
margin=0, shrink = False, SysFont=None, FontPath=None,
MaxFont=0, MinFont=0):
"""Returns a surface containing the passed text string, reformatted
to fit within the given rect, word-wrapping as necessary. The text
will be anti-aliased.
Takes the following arguments:
string - the text you wish to render. \n begins a new line.
font - a Font object
rect - a rectstyle giving the size of the surface requested.
text_color - a three-byte tuple of the rgb value of the
text color. ex (0, 0, 0) = BLACK
background_color - a three-byte tuple of the rgb value of the surface.
justification - 0 (default) left-justified
1 horizontally centered
2 right-justified
Returns the following values:
Success - a surface object with the text rendered onto it.
Failure - raises a TextRectException if the text won't fit onto the
surface.
"""
""" Amended by el_Paraguayo:
- cutoff=True - cuts off text instead of raising error
- margin=(left,right,top,bottom) or
- margin=2 is equal to margin = (2,2,2,2)
- shrink=True adds variable font size to fit text
- Has additional args:
- SysFont=None - set SysFont to use when shrinking
- FontPath=none - set custom font path to use when shrinking
MaxFont=0 (max font size)
MinFont=0 (min font size)
- vjustification=0 adds vertical justification
0 = Top
1 = Middle
2 = Bottom
"""
class TextRectException(Exception):
def __init__(self, message = None):
self.message = message
def __str__(self):
return self.message
def draw_text_rect(string, font, rect, text_color, background_color,
justification=0, vjustification=0, margin=0,
cutoff=True):
final_lines = []
requested_lines = string.splitlines()
# Create a series of lines that will fit on the provided
# rectangle.
for requested_line in requested_lines:
if font.size(requested_line)[0] > (rect.width - (margin[0] + margin[1])):
words = requested_line.split(' ')
# if any of our words are too long to fit, return.
# for word in words:
# if font.size(word)[0] >= (rect.width - (margin * 2)):
# raise TextRectException, "The word " + word + "
# is too long to fit in the rect passed."
# Start a new line
accumulated_line = ""
for word in words:
test_line = accumulated_line + word + " "
# Build the line while the words fit.
if font.size(test_line.strip())[0] < (rect.width - (margin[0] + margin[1])) :
accumulated_line = test_line
else:
final_lines.append(accumulated_line)
accumulated_line = word + " "
final_lines.append(accumulated_line)
else:
final_lines.append(requested_line)
# Let's try to write the text out on the surface.
surface = pygame.Surface(rect.size)
surface.fill(background_color)
accumulated_height = 0
for line in final_lines:
if accumulated_height + font.size(line)[1] >= (rect.height - margin[2] - margin[3]):
if not cutoff:
raise TextRectException, "Once word-wrapped, the text string was too tall to fit in the rect."
else:
break
if line != "":
tempsurface = font.render(line.strip(), 1, text_color)
if justification == 0:
surface.blit(tempsurface, (0 + margin[0], accumulated_height + margin[2]))
elif justification == 1:
surface.blit(tempsurface, ((rect.width - tempsurface.get_width()) / 2, accumulated_height + margin[2]))
elif justification == 2:
surface.blit(tempsurface, (rect.width - tempsurface.get_width() - margin[1], accumulated_height + margin[2]))
else:
raise TextRectException, "Invalid justification argument: " + str(justification)
accumulated_height += font.size(line)[1]
if vjustification == 0:
# Top aligned, we're ok
pass
elif vjustification == 1:
# Middle aligned
tempsurface = pygame.Surface(rect.size)
tempsurface.fill(background_color)
vpos = (0, (rect.size[1] - accumulated_height)/2)
tempsurface.blit(surface, vpos, (0,0,rect.size[0],accumulated_height))
surface = tempsurface
elif vjustification == 2:
# Bottom aligned
tempsurface = pygame.Surface(rect.size)
tempsurface.fill(background_color)
vpos = (0, (rect.size[1] - accumulated_height - margin[3]))
tempsurface.blit(surface, vpos, (0,0,rect.size[0],accumulated_height))
surface = tempsurface
else:
raise TextRectException, "Invalid vjustification argument: " + str(justification)
return surface
surface = None
if type(margin) is tuple:
if not len(margin) == 4:
try:
margin = (int(margin), int(margin), int(margin), int(margin))
except:
margin = (0,0,0,0)
elif type(margin) is int:
margin = (margin, margin, margin, margin)
else:
margin = (0,0,0,0)
if not shrink:
surface = draw_text_rect(string, font, rect, text_color, background_color,
justification=justification, vjustification=vjustification,
margin=margin, cutoff=False)
else:
fontsize = MaxFont
fit = False
while fontsize >= MinFont:
if FontPath is None:
myfont = pygame.font.SysFont(SysFont,fontsize)
else:
myfont = pygame.font.Font(FontPath,fontsize)
try:
surface = draw_text_rect(string, myfont, rect,text_color, background_color,
justification=justification, vjustification=vjustification,
margin=margin, cutoff=False)
fit = True
break
except:
fontsize -= 1
if not fit:
surface = draw_text_rect(string, myfont, rect, text_color, background_color,
justification=justification, vjustification=vjustification,
margin=margin)
return surface
# Main function - returns screen to main script
# Will be overriden by plugins
# Defaults to showing name and description of plugin
def showScreen(self):
self.screen.fill([0,0,0])
screentext = pygame.font.SysFont("freesans",20).render("%s: %s." % (self.pluginname, self.plugininfo),1,(255,255,255))
screenrect = screentext.get_rect()
screenrect.centerx = self.screen.get_rect().centerx
screenrect.centery = self.screen.get_rect().centery
self.screen.blit(screentext,screenrect)
return self.screen
def setUpdateTimer(self):
pygame.time.set_timer(self.userevents["update"], 0)
pygame.time.set_timer(self.userevents["update"], int(self.refreshtime * 1000))
# This function should not be overriden
def __init__(self, screensize, scale=True, userevents=None):
# Set config filepath...
self.plugindir=os.path.dirname(sys.modules[self.__class__.__module__].__file__)
self.configfile = os.path.join(self.plugindir, "config", "screen.ini")
# ...and read the config file
self.readConfig()
# Save the requested screen size
self.screensize = screensize
self.userevents = userevents
# Check requested screen size is compatible and set supported property
if screensize not in self.supportedsizes:
self.supported = False
else:
self.supported = True
# Initialise pygame for the class
if self.supported or scale:
pygame.init()
self.screen = pygame.display.set_mode(self.screensize)
self.surfacesize = self.supportedsizes[0]
self.surface = pygame.Surface(self.surfacesize)
from dependencies.dependency import schemata
from dependencies import atapi
from dependencies.dependency import getToolByName
from lims.browser.bika_listing import BikaListingView
from lims.config import PROJECTNAME
from lims import bikaMessageFactory as _
from lims.utils import t
from dependencies.dependency import IFolderContentsView
from dependencies.folder import ATFolder, ATFolderSchema
from dependencies.dependency import IViewView
from lims.interfaces import ISRTemplates
from dependencies.dependency import implements
class TemplatesView(BikaListingView):
implements(IFolderContentsView, IViewView)
def __init__(self, context, request):
super(TemplatesView, self).__init__(context, request)
self.catalog = "bika_setup_catalog"
self.contentFilter = {
'portal_type': 'SRTemplate',
'sort_order': 'sortable_title',
'path': {
"query": "/".join(self.context.getPhysicalPath()),
"level": 0
},
}
self.show_sort_column = False
self.show_select_row = False
self.show_select_column = True
self.icon = self.portal_url + "/++resource++bika.lims.images/artemplate_big.png"
self.title = self.context.translate(_("SR Templates"))
self.description = ""
self.context_actions = {
_('Add Template'): {
'url': 'createObject?type_name=SRTemplate',
'icon': '++resource++bika.lims.images/add.png'
}
}
self.columns = {
'Title': {
'title': _('Template'),
'index': 'sortable_title'
},
'Description': {
'title': _('Description'),
'index': 'description'
},
}
self.review_states = [{
'id':'default',
'title': _('Default'),
'contentFilter': {},
'columns': ['Title', 'Description']
}]
def folderitems(self):
items = BikaListingView.folderitems(self)
for item in items:
if not item.has_key('obj'): continue
obj = item['obj']
title_link = "%s" % (item['url'], item['title'])
item['replace']['Title'] = title_link
return items
schema = ATFolderSchema.copy()
class SRTemplates(ATFolder):
implements(ISRTemplates)
displayContentsTab = False
schema = schema
schemata.finalizeATCTSchema(schema, folderish = True, moveDiscussion = False)
atapi.registerType(SRTemplates, PROJECTNAME)
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see .
"""abydos.distance._baulieu_ii.
Baulieu II similarity
"""
from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['BaulieuII']
class BaulieuII(_TokenDistance):
r"""Baulieu II similarity.
For two sets X and Y and a population N, Baulieu II similarity
:cite:`Baulieu:1989` is
.. math::
sim_{BaulieuII}(X, Y) =
\frac{|X \cap Y|^2 \cdot |(N \setminus X) \setminus Y|^2}
{|X| \cdot |Y| \cdot |N \setminus X| \cdot |N \setminus Y|}
This is based on Baulieu's 13th dissimilarity coefficient.
In :ref:`2x2 confusion table terms `, where a+b+c+d=n,
this is
.. math::
sim_{BaulieuII} =
\frac{a^2d^2}{(a+b)(a+c)(b+d)(c+d)}
.. versionadded:: 0.4.0
"""
def __init__(
self,
alphabet: Optional[
Union[TCounter[str], Sequence[str], Set[str], int]
] = None,
tokenizer: Optional[_Tokenizer] = None,
intersection_type: str = 'crisp',
**kwargs: Any
) -> None:
"""Initialize BaulieuII instance.
Parameters
----------
alphabet : Counter, collection, int, or None
This represents the alphabet of possible tokens.
See :ref:`alphabet ` description in
:py:class:`_TokenDistance` for details.
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
intersection_type : str
Specifies the intersection type, and set type as a result:
See :ref:`intersection_type ` description in
:py:class:`_TokenDistance` for details.
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
metric : _Distance
A string distance measure class for use in the ``soft`` and
``fuzzy`` variants.
threshold : float
A threshold value, similarities above which are counted as
members of the intersection for the ``fuzzy`` variant.
.. versionadded:: 0.4.0
"""
super(BaulieuII, self).__init__(
alphabet=alphabet,
tokenizer=tokenizer,
intersection_type=intersection_type,
**kwargs
)
def sim(self, src: str, tar: str) -> float:
"""Return the Baulieu II similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Baulieu II similarity
Examples
--------
>>> cmp = BaulieuII()
>>> cmp.sim('cat', 'hat')
0.24871959237343852
>>> cmp.sim('Niall', 'Neil')
0.13213719608444902
>>> cmp.sim('aluminum', 'Catalan')
0.013621892326789235
>>> cmp.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
self._tokenize(src, tar)
a = self._intersection_card()
b = self._src_only_card()
c = self._tar_only_card()
d = self._total_complement_card()
num = a * a * d * d
if num == 0:
return 0.0
return num / ((a + b) * (a + c) * (b + d) * (c + d))
if __name__ == '__main__':
import doctest
doctest.testmod()
#
# Copyright 2009 Huang Ying
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
import sys
import pdf
import pdfinfo
import imb
import imbinfo
import djvu
import djvuinfo
def get_input_info(config):
iformat = config.input_format
if iformat == 'pdf':
info_parser = pdfinfo.PDFInfoParser(config)
elif iformat == 'imb':
info_parser = imbinfo.ImbInfoParser(config)
elif iformat == 'djvu':
info_parser = djvuinfo.DJVUInfoParser(config)
else:
print 'Invalid input format: %s' % (iformat)
sys.exit(-1)
return info_parser.parse()
def create_input_to_ppm(config):
if config.input_format == 'pdf':
return pdf.create_pdf_to_ppm(config)
elif config.input_format == 'imb':
return imb.IMBToPPM(config)
elif config.input_format == 'djvu':
return djvu.DJVUToPPM(config)
# Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see .
import sys
from completers import NodeCompleter
class CmdSshNode(NodeCompleter):
"""
sshnode []
SSH to a cluster node
Examples:
$ starcluster sshnode mycluster master
$ starcluster sshnode mycluster node001
...
or same thing in shorthand:
$ starcluster sshnode mycluster 0
$ starcluster sshnode mycluster 1
...
You can also execute commands without directly logging in:
$ starcluster sshnode mycluster node001 'cat /etc/hosts'
"""
names = ['sshnode', 'sn']
def addopts(self, parser):
parser.add_option("-u", "--user", dest="user", action="store",
type="string", default='root',
help="login as USER (defaults to root)")
parser.add_option("-X", "--forward-x11", dest="forward_x11",
action="store_true", default=False,
help="enable X11 forwarding ")
parser.add_option("-A", "--forward-agent", dest="forward_agent",
action="store_true", default=False,
help="enable authentication agent forwarding")
parser.add_option("-t", "--pseudo-tty", dest="pseudo_tty",
action="store_true", default=False,
help="enable pseudo-tty allocation (for interactive "
"commands and screens)")
def execute(self, args):
if len(args) < 2:
self.parser.error(
"please specify a cluster and node to connect to")
scluster = args[0]
node = args[1]
cmd = ' '.join(args[2:])
retval = self.cm.ssh_to_cluster_node(
scluster, node, user=self.opts.user, command=cmd,
forward_x11=self.opts.forward_x11, pseudo_tty=self.opts.pseudo_tty,
forward_agent=self.opts.forward_agent)
if cmd and retval is not None:
sys.exit(retval)
# (c) 2012-2014, Michael DeHaan
# Copyright 2012, Seth Vidal
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Create inventory hosts and groups in the memory inventory'''
### We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
# FIXME: is this necessary in v2?
#if self.runner.noop_on_check(inject):
# return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
# Parse out any hostname:port patterns
new_name = self._task.args.get('name', self._task.args.get('hostname', None))
#vv("creating host via 'add_host': hostname=%s" % new_name)
if ":" in new_name:
new_name, new_port = new_name.split(":")
self._task.args['ansible_ssh_port'] = new_port
groups = self._task.args.get('groupname', self._task.args.get('groups', self._task.args.get('group', '')))
# add it to the group if that was specified
new_groups = []
if groups:
for group_name in groups.split(","):
if group_name not in new_groups:
new_groups.append(group_name.strip())
# Add any variables to the new_host
host_vars = dict()
for k in self._task.args.keys():
if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
host_vars[k] = self._task.args[k]
return dict(changed=True, add_host=dict(host_name=new_name, groups=new_groups, host_vars=host_vars))
# -*- coding: utf-8 -*-
"""
***************************************************************************
SetZValue.py
--------------
Date : July 2017
Copyright : (C) 2017 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'July 2017'
__copyright__ = '(C) 2017, Nyall Dawson'
import os
from qgis.core import (QgsGeometry,
QgsWkbTypes,
QgsPropertyDefinition,
QgsProcessingParameters,
QgsProcessingParameterNumber,
QgsProcessingFeatureSource)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class SetZValue(QgisFeatureBasedAlgorithm):
Z_VALUE = 'Z_VALUE'
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
self.z_value = 0
self.dynamic_z = False
self.z_property = None
def name(self):
return 'setzvalue'
def displayName(self):
return self.tr('Set Z value')
def outputName(self):
return self.tr('Z Added')
def tags(self):
return self.tr('set,add,z,25d,3d,values').split(',')
def initParameters(self, config=None):
z_param = QgsProcessingParameterNumber(self.Z_VALUE,
self.tr('Z Value'), QgsProcessingParameterNumber.Double, defaultValue=0.0)
z_param.setIsDynamic(True)
z_param.setDynamicLayerParameterName('INPUT')
z_param.setDynamicPropertyDefinition(QgsPropertyDefinition(self.Z_VALUE, self.tr("Z Value"), QgsPropertyDefinition.Double))
self.addParameter(z_param)
def outputWkbType(self, inputWkb):
return QgsWkbTypes.addZ(inputWkb)
def sourceFlags(self):
return QgsProcessingFeatureSource.FlagSkipGeometryValidityChecks
def prepareAlgorithm(self, parameters, context, feedback):
self.z_value = self.parameterAsDouble(parameters, self.Z_VALUE, context)
self.dynamic_z = QgsProcessingParameters.isDynamic(parameters, self.Z_VALUE)
if self.dynamic_z:
self.z_property = parameters[self.Z_VALUE]
return True
def processFeature(self, feature, context, feedback):
input_geometry = feature.geometry()
if input_geometry:
new_geom = input_geometry.constGet().clone()
if QgsWkbTypes.hasZ(new_geom.wkbType()):
# addZValue won't alter existing Z values, so drop them first
new_geom.dropZValue()
z = self.z_value
if self.dynamic_z:
z, ok = self.z_property.valueAsDouble(context.expressionContext(), z)
new_geom.addZValue(z)
feature.setGeometry(QgsGeometry(new_geom))
return [feature]
def supportInPlaceEdit(self, layer):
return super().supportInPlaceEdit(layer) and QgsWkbTypes.hasZ(layer.wkbType())
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent a GCE Virtual Machine object.
Zones:
run 'gcutil listzones'
Machine Types:
run 'gcutil listmachinetypes'
Images:
run 'gcutil listimages'
All VM specifics are self-contained and the class provides methods to
operate on the VM: boot, shutdown, etc.
"""
import json
import re
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import package_managers
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.gcp import gce_disk
from perfkitbenchmarker.gcp import util
flags.DEFINE_integer('gce_num_local_ssds', 0,
'The number of ssds that should be added to the VM. Note '
'that this is currently only supported in certain zones '
'(see https://cloud.google.com/compute/docs/local-ssd).')
flags.DEFINE_string('gcloud_scopes', None, 'If set, space-separated list of '
'scopes to apply to every created machine')
FLAGS = flags.FLAGS
SET_INTERRUPTS_SH = 'set-interrupts.sh'
BOOT_DISK_SIZE_GB = 10
BOOT_DISK_TYPE = disk.STANDARD
NVME = 'nvme'
SCSI = 'SCSI'
class GceVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a Google Compute Engine Virtual Machine."""
def __init__(self, vm_spec):
"""Initialize a GCE virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(GceVirtualMachine, self).__init__(vm_spec)
disk_spec = disk.BaseDiskSpec(BOOT_DISK_SIZE_GB, BOOT_DISK_TYPE, None)
self.boot_disk = gce_disk.GceDisk(
disk_spec, self.name, self.zone, self.project, self.image)
self.max_local_disks = FLAGS.gce_num_local_ssds
self.local_disk_counter = 0
def _CreateDependencies(self):
"""Create VM dependencies."""
self.boot_disk.Create()
def _DeleteDependencies(self):
"""Delete VM dependencies."""
self.boot_disk.Delete()
def _Create(self):
"""Create a GCE VM instance."""
super(GceVirtualMachine, self)._Create()
with open(self.ssh_public_key) as f:
public_key = f.read().rstrip('\n')
with vm_util.NamedTemporaryFile(dir=vm_util.GetTempDir(),
prefix='key-metadata') as tf:
tf.write('%s:%s\n' % (self.user_name, public_key))
tf.close()
create_cmd = [FLAGS.gcloud_path,
'compute',
'instances',
'create', self.name,
'--disk',
'name=%s' % self.boot_disk.name,
'boot=yes',
'mode=rw',
'--machine-type', self.machine_type,
'--tags=perfkitbenchmarker',
'--maintenance-policy', 'TERMINATE',
'--no-restart-on-failure',
'--metadata-from-file',
'sshKeys=%s' % tf.name,
'--metadata',
'owner=%s' % FLAGS.owner]
ssd_interface_option = NVME if NVME in self.image else SCSI
for _ in range(self.max_local_disks):
create_cmd.append('--local-ssd')
create_cmd.append('interface=%s' % ssd_interface_option)
if FLAGS.gcloud_scopes:
create_cmd.extend(['--scopes'] +
re.split(r'[,; ]', FLAGS.gcloud_scopes))
create_cmd.extend(util.GetDefaultGcloudFlags(self))
vm_util.IssueCommand(create_cmd)
@vm_util.Retry()
def _PostCreate(self):
"""Get the instance's data."""
getinstance_cmd = [FLAGS.gcloud_path,
'compute',
'instances',
'describe', self.name]
getinstance_cmd.extend(util.GetDefaultGcloudFlags(self))
stdout, _, _ = vm_util.IssueCommand(getinstance_cmd)
response = json.loads(stdout)
network_interface = response['networkInterfaces'][0]
self.internal_ip = network_interface['networkIP']
self.ip_address = network_interface['accessConfigs'][0]['natIP']
def _Delete(self):
"""Delete a GCE VM instance."""
delete_cmd = [FLAGS.gcloud_path,
'compute',
'instances',
'delete', self.name]
delete_cmd.extend(util.GetDefaultGcloudFlags(self))
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the VM exists."""
getinstance_cmd = [FLAGS.gcloud_path,
'compute',
'instances',
'describe', self.name]
getinstance_cmd.extend(util.GetDefaultGcloudFlags(self))
stdout, _, _ = vm_util.IssueCommand(getinstance_cmd, suppress_warning=True)
try:
json.loads(stdout)
except ValueError:
return False
return True
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
# Get the names for the disk(s) we are going to create.
if disk_spec.disk_type == disk.LOCAL:
new_count = self.local_disk_counter + disk_spec.num_striped_disks
if new_count > self.max_local_disks:
raise errors.Error('Not enough local disks.')
disk_names = ['local-ssd-%d' % i
for i in range(self.local_disk_counter, new_count)]
self.local_disk_counter = new_count
else:
disk_names = ['%s-data-%d-%d' % (self.name, len(self.scratch_disks), i)
for i in range(disk_spec.num_striped_disks)]
# Instantiate the disk(s).
disks = [gce_disk.GceDisk(disk_spec, name, self.zone, self.project)
for name in disk_names]
self._CreateScratchDiskFromDisks(disk_spec, disks)
def GetName(self):
"""Get a GCE VM's unique name."""
return self.name
def GetLocalDisks(self):
"""Returns a list of local disks on the VM.
Returns:
A list of strings, where each string is the absolute path to the local
disks on the VM (e.g. '/dev/sdb').
"""
return ['/dev/disk/by-id/google-local-ssd-%d' % i
for i in range(self.max_local_disks)]
def SetupLocalDisks(self):
"""Performs GCE specific local SSD setup (runs set-interrupts.sh)."""
self.PushDataFile(SET_INTERRUPTS_SH)
self.RemoteCommand('chmod +rx set-interrupts.sh; sudo ./set-interrupts.sh')
def AddMetadata(self, **kwargs):
"""Adds metadata to the VM via 'gcloud compute instances add-metadata'."""
if not kwargs:
return
cmd = [FLAGS.gcloud_path, 'compute', 'instances', 'add-metadata',
self.name, '--metadata']
for key, value in kwargs.iteritems():
cmd.append('{0}={1}'.format(key, value))
cmd.extend(util.GetDefaultGcloudFlags(self))
vm_util.IssueCommand(cmd)
class DebianBasedGceVirtualMachine(GceVirtualMachine,
package_managers.AptMixin):
pass
class RhelBasedGceVirtualMachine(GceVirtualMachine,
package_managers.YumMixin):
pass
#!/usr/bin/env python
# -*- Mode: Python; indent-tabs-mode: nil -*-
# vi: set ts=4 sw=4 expandtab:
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is [Open Source Virtual Machine].
#
# The Initial Developer of the Original Code is
# Adobe System Incorporated.
# Portions created by the Initial Developer are Copyright (C) 2005-2006
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
#
# This script runs just like a traditional configure script, to do configuration
# testing and makefile generation.
import os.path
import sys
thisdir = os.path.dirname(os.path.abspath(__file__))
# Look for additional modules in our build/ directory.
sys.path.append(thisdir)
from build.configuration import *
import build.getopt
o = build.getopt.Options()
config = Configuration(thisdir, options = o,
sourcefile = 'core/avmplus.h')
buildTamarin = o.getBoolArg('tamarin', True)
if buildTamarin:
config.subst("ENABLE_TAMARIN", 1)
buildShell = o.getBoolArg("shell", False)
if (buildShell):
config.subst("ENABLE_SHELL", 1)
buildThane = o.getBoolArg("thane", False)
if (buildThane):
config.subst("ENABLE_THANE", 1)
APP_CPPFLAGS = ""
APP_CXXFLAGS = ""
OPT_CXXFLAGS = "-O3 "
OPT_CPPFLAGS = ""
DEBUG_CPPFLAGS = "-DDEBUG -D_DEBUG "
DEBUG_CXXFLAGS = ""
DEBUG_LDFLAGS = ""
OS_LIBS = []
OS_LDFLAGS = ""
MMGC_CPPFLAGS = ""
AVMSHELL_CPPFLAGS = ""
AVMSHELL_LDFLAGS = ""
MMGC_DEFINES = {'SOFT_ASSERTS': None}
NSPR_INCLUDES = ""
NSPR_LDOPTS = ""
selfTest = o.getBoolArg("selftests", False)
if selfTest:
APP_CPPFLAGS += "-DAVMPLUS_SELFTEST "
memoryProfiler = o.getBoolArg("memory-profiler", False)
if memoryProfiler:
APP_CPPFLAGS += "-DMMGC_MEMORY_PROFILER "
MMGC_INTERIOR_PTRS = o.getBoolArg('mmgc-interior-pointers', False)
if MMGC_INTERIOR_PTRS:
MMGC_DEFINES['MMGC_INTERIOR_PTRS'] = None
MMGC_DYNAMIC = o.getBoolArg('mmgc-shared', False)
if MMGC_DYNAMIC:
MMGC_DEFINES['MMGC_DLL'] = None
MMGC_CPPFLAGS += "-DMMGC_IMPL "
MMGC_THREADSAFE = o.getBoolArg('threadsafe-mmgc', False)
if MMGC_THREADSAFE:
MMGC_DEFINES['MMGC_THREADSAFE'] = None
NSPR_INCLUDES = o.getStringArg('nspr-includes')
MMGC_CPPFLAGS += NSPR_INCLUDES + " "
APP_CPPFLAGS += NSPR_INCLUDES + " "
NSPR_LDOPTS = o.getStringArg('nspr-ldopts')
OS_LDFLAGS += " " + NSPR_LDOPTS
os, cpu = config.getTarget()
if config.getCompiler() == 'GCC':
APP_CXXFLAGS = "-fstrict-aliasing -Wextra -Wall -Wno-reorder -Wno-switch -Wno-invalid-offsetof -Wsign-compare -Wunused-parameter -fmessage-length=0 -fno-rtti -fno-exceptions "
if config.getDebug():
APP_CXXFLAGS += ""
else:
APP_CXXFLAGS += "-Wuninitialized "
DEBUG_CXXFLAGS += "-g "
elif config.getCompiler() == 'VS':
if cpu == "arm":
APP_CXXFLAGS = "-W4 -WX -wd4291 -wd4201 -wd4189 -wd4740 -wd4127 -fp:fast -GF -GS- -Zc:wchar_t- "
OS_LDFLAGS += "-MAP "
if config.getDebug():
DEBUG_CXXFLAGS = "-Od "
APP_CXXFLAGS += "-GR- -fp:fast -GS- -Zc:wchar_t- -Zc:forScope "
else:
OPT_CXXFLAGS = "-O2 -GR- "
else:
APP_CXXFLAGS = "-W4 -WX -wd4291 -GF -fp:fast -GS- -Zc:wchar_t- "
OS_LDFLAGS += "-SAFESEH:NO -MAP "
if config.getDebug():
DEBUG_CXXFLAGS = "-Od -EHsc "
else:
OPT_CXXFLAGS = "-O2 -Ob1 -GR- "
if memoryProfiler:
OPT_CXXFLAGS += "-Oy- -Zi "
DEBUG_CXXFLAGS += "-Zi "
DEBUG_LDFLAGS += "-DEBUG "
elif config.getCompiler() == 'SunStudio':
OPT_CXXFLAGS = "-xO5 "
DEBUG_CXXFLAGS += "-g "
else:
raise Exception('Unrecognized compiler: ' + config.getCompiler())
zlib_include_dir = o.getStringArg('zlib-include-dir')
if zlib_include_dir is not None:
AVMSHELL_CPPFLAGS += "-I%s " % zlib_include_dir
zlib_lib = o.getStringArg('zlib-lib')
if zlib_lib is not None:
AVMSHELL_LDFLAGS = zlib_lib
else:
AVMSHELL_LDFLAGS = '$(call EXPAND_LIBNAME,z)'
if os == "darwin":
AVMSHELL_LDFLAGS += " -exported_symbols_list " + thisdir + "/platform/mac/shell/exports.exp"
MMGC_DEFINES.update({'TARGET_API_MAC_CARBON': 1,
'DARWIN': 1,
'_MAC': None,
'AVMPLUS_MAC': None,
'TARGET_RT_MAC_MACHO': 1})
APP_CXXFLAGS += "-fpascal-strings -faltivec -fasm-blocks "
if cpu == 'x86_64' or cpu == 'ppc64' or o.getBoolArg("leopard"):
# use --enable-leopard to build for 10.5 or later; this is mainly useful for enabling
# us to build with gcc4.2 (which requires the 10.5 sdk), since it has a slightly different
# set of error & warning sensitivities. Note that we don't override CC/CXX here, the calling script
# is expected to do that if desired (thus we can support 10.5sdk with either 4.0 or 4.2)
APP_CXXFLAGS += "-mmacosx-version-min=10.5 -isysroot /Developer/SDKs/MacOSX10.5.sdk "
config.subst("MACOSX_DEPLOYMENT_TARGET",10.5)
else:
APP_CXXFLAGS += "-mmacosx-version-min=10.4 -isysroot /Developer/SDKs/MacOSX10.4u.sdk "
config.subst("MACOSX_DEPLOYMENT_TARGET",10.4)
elif os == "freebsd":
MMGC_DEFINES.update({
'LINUX' :None,
'HAVE_PTHREAD_NP_H' :None,
'UNIX': None,
'AVMPLUS_UNIX' :None })
OS_LIBS.append('pthread')
APP_CPPFLAGS += '-DAVMPLUS_CDECL '
elif os == "windows" or os == "cygwin":
MMGC_DEFINES.update({'WIN32': None,
'_CRT_SECURE_NO_DEPRECATE': None})
OS_LDFLAGS += "-MAP "
if cpu == "arm":
APP_CPPFLAGS += "-DARM -D_ARM_ -DARMV5 -DUNICODE -DUNDER_CE=1 -DMMGC_ARM -QRarch5t "
OS_LIBS.append('mmtimer corelibc coredll')
else:
APP_CPPFLAGS += "-DWIN32_LEAN_AND_MEAN -D_CONSOLE "
OS_LIBS.append('winmm')
OS_LIBS.append('shlwapi')
elif os == "linux":
MMGC_DEFINES.update({'UNIX': None,
'AVMPLUS_UNIX': None,
'LINUX': None})
OS_LIBS.append('pthread')
APP_CPPFLAGS += '-DAVMPLUS_CDECL '
if cpu == "x86_64":
# workaround https://bugzilla.mozilla.org/show_bug.cgi?id=467776
OPT_CXXFLAGS += '-fno-schedule-insns2 '
# these warnings are too noisy
APP_CXXFLAGS += ' -Wno-parentheses '
if config.getDebug():
OS_LIBS.append("dl")
elif os == "sunos":
if config.getCompiler() != 'GCC':
APP_CXXFLAGS = ""
OPT_CXXFLAGS = "-xO5 "
DEBUG_CXXFLAGS = "-g "
MMGC_DEFINES.update({'UNIX': None,
'AVMPLUS_UNIX': None,
'SOLARIS': None})
OS_LIBS.append('pthread')
APP_CPPFLAGS += '-DAVMPLUS_CDECL '
if config.getDebug():
OS_LIBS.append("dl")
else:
raise Exception("Unsupported OS")
if cpu == "i686":
if config.getCompiler() == 'GCC' and os == 'darwin':
#only mactel always has sse2
APP_CPPFLAGS += "-msse2 "
elif cpu == "powerpc":
# we detect this in core/avmbuild.h and MMgc/*build.h
None
elif cpu == "ppc64":
# we detect this in core/avmbuild.h and MMgc/*build.h
None
elif cpu == "sparc":
APP_CPPFLAGS += "-DAVMPLUS_SPARC "
elif cpu == "x86_64":
# we detect this in core/avmbuild.h and MMgc/*build.h
None
elif cpu == "arm":
# we detect this in core/avmbuild.h and MMgc/*build.h
None
else:
raise Exception("Unsupported CPU")
if o.getBoolArg("selftests"):
APP_CPPFLAGS += "-DAVMPLUS_SELFTEST "
if o.getBoolArg("debugger"):
APP_CPPFLAGS += "-DDEBUGGER "
if o.getBoolArg('perfm'):
APP_CPPFLAGS += "-DPERFM "
if o.getBoolArg('disable-nj'):
APP_CPPFLAGS += '-DAVMPLUS_DISABLE_NJ '
if o.getBoolArg('abc-interp'):
APP_CPPFLAGS += '-DAVMPLUS_ABC_INTERPRETER '
if o.getBoolArg('selftest'):
APP_CPPFLAGS += '-DAVMPLUS_SELFTEST '
# We do two things with MMGC_DEFINES: we append it to APP_CPPFLAGS and we also write MMgc-config.h
APP_CPPFLAGS += ''.join(val is None and ('-D%s ' % var) or ('-D%s=%s ' % (var, val))
for (var, val) in MMGC_DEFINES.iteritems())
definePattern = \
"""#ifndef %(var)s
#define %(var)s %(val)s
#endif
"""
outpath = "%s/MMgc-config.h" % config.getObjDir()
contents = ''.join(definePattern % {'var': var,
'val': val is not None and val or ''}
for (var, val) in MMGC_DEFINES.iteritems())
writeFileIfChanged(outpath, contents)
config.subst("APP_CPPFLAGS", APP_CPPFLAGS)
config.subst("APP_CXXFLAGS", APP_CXXFLAGS)
config.subst("OPT_CPPFLAGS", OPT_CPPFLAGS)
config.subst("OPT_CXXFLAGS", OPT_CXXFLAGS)
config.subst("DEBUG_CPPFLAGS", DEBUG_CPPFLAGS)
config.subst("DEBUG_CXXFLAGS", DEBUG_CXXFLAGS)
config.subst("DEBUG_LDFLAGS", DEBUG_LDFLAGS)
config.subst("OS_LIBS", " ".join(OS_LIBS))
config.subst("OS_LDFLAGS", OS_LDFLAGS)
config.subst("MMGC_CPPFLAGS", MMGC_CPPFLAGS)
config.subst("AVMSHELL_CPPFLAGS", AVMSHELL_CPPFLAGS)
config.subst("AVMSHELL_LDFLAGS", AVMSHELL_LDFLAGS)
config.subst("MMGC_DYNAMIC", MMGC_DYNAMIC and 1 or '')
config.generate("Makefile")
o.finish()
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: jboss
version_added: "1.4"
short_description: deploy applications to JBoss
description:
- Deploy applications to JBoss standalone using the filesystem
options:
deployment:
required: true
description:
- The name of the deployment
src:
required: false
description:
- The remote path of the application ear or war to deploy
deploy_path:
required: false
default: /var/lib/jbossas/standalone/deployments
description:
- The location in the filesystem where the deployment scanner listens
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the application should be deployed or undeployed
notes:
- "The JBoss standalone deployment-scanner has to be enabled in standalone.xml"
- "Ensure no identically named application is deployed through the JBoss CLI"
author: "Jeroen Hoekx (@jhoekx)"
"""
EXAMPLES = """
# Deploy a hello world application
- jboss:
src: /tmp/hello-1.0-SNAPSHOT.war
deployment: hello.war
state: present
# Update the hello world application
- jboss:
src: /tmp/hello-1.1-SNAPSHOT.war
deployment: hello.war
state: present
# Undeploy the hello world application
- jboss:
deployment: hello.war
state: absent
"""
import os
import shutil
import time
from ansible.module_utils.basic import AnsibleModule
def is_deployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment))
def is_undeployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment))
def is_failed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment))
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='path'),
deployment=dict(required=True),
deploy_path=dict(type='path', default='/var/lib/jbossas/standalone/deployments'),
state=dict(choices=['absent', 'present'], default='present'),
),
required_if=[('state', 'present', ('src',))]
)
result = dict(changed=False)
src = module.params['src']
deployment = module.params['deployment']
deploy_path = module.params['deploy_path']
state = module.params['state']
if not os.path.exists(deploy_path):
module.fail_json(msg="deploy_path does not exist.")
deployed = is_deployed(deploy_path, deployment)
if state == 'present' and not deployed:
if not os.path.exists(src):
module.fail_json(msg='Source file %s does not exist.' % src)
if is_failed(deploy_path, deployment):
# Clean up old failed deployment
os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
if state == 'present' and deployed:
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
deployed = False
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
if state == 'absent' and deployed:
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
while deployed:
deployed = not is_undeployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Undeploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
"""Test the Advantage Air Switch Platform."""
from json import loads
from homeassistant.components.advantage_air.const import (
ADVANTAGE_AIR_STATE_OFF,
ADVANTAGE_AIR_STATE_ON,
)
from homeassistant.components.switch import (
DOMAIN as SWITCH_DOMAIN,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF
from homeassistant.helpers import entity_registry as er
from tests.components.advantage_air import (
TEST_SET_RESPONSE,
TEST_SET_URL,
TEST_SYSTEM_DATA,
TEST_SYSTEM_URL,
add_mock_config,
)
async def test_cover_async_setup_entry(hass, aioclient_mock):
"""Test climate setup without sensors."""
aioclient_mock.get(
TEST_SYSTEM_URL,
text=TEST_SYSTEM_DATA,
)
aioclient_mock.get(
TEST_SET_URL,
text=TEST_SET_RESPONSE,
)
await add_mock_config(hass)
registry = er.async_get(hass)
assert len(aioclient_mock.mock_calls) == 1
# Test Switch Entity
entity_id = "switch.ac_one_fresh_air"
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-freshair"
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: [entity_id]},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 3
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["freshAirStatus"] == ADVANTAGE_AIR_STATE_ON
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: [entity_id]},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 5
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["freshAirStatus"] == ADVANTAGE_AIR_STATE_OFF
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_udld
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages UDLD global configuration params.
description:
- Manages UDLD global configuration params.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Module will fail if the udld feature has not been previously enabled.
options:
aggressive:
description:
- Toggles aggressive mode.
choices: ['enabled','disabled']
msg_time:
description:
- Message time in seconds for UDLD packets or keyword 'default'.
reset:
description:
- Ability to reset all ports shut down by UDLD. 'state' parameter
cannot be 'absent' when this is present.
type: bool
default: 'no'
state:
description:
- Manage the state of the resource. When set to 'absent',
aggressive and msg_time are set to their default values.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure udld aggressive mode is globally disabled and se global message interval is 20
- nxos_udld:
aggressive: disabled
msg_time: 20
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Ensure agg mode is globally enabled and msg time is 15
- nxos_udld:
aggressive: enabled
msg_time: 15
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
existing:
description:
- k/v pairs of existing udld configuration
returned: always
type: dict
sample: {"aggressive": "disabled", "msg_time": "15"}
end_state:
description: k/v pairs of udld configuration after module execution
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["udld message-time 40", "udld aggressive"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
PARAM_TO_DEFAULT_KEYMAP = {
'msg_time': '15',
}
def execute_show_command(command, module, command_type='cli_show'):
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
if network_api == 'cliconf':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif network_api == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_commands_config_udld_global(delta, reset, existing):
commands = []
for param, value in delta.items():
if param == 'aggressive':
command = 'udld aggressive' if value == 'enabled' else 'no udld aggressive'
commands.append(command)
elif param == 'msg_time':
if value == 'default':
if existing.get('msg_time') != PARAM_TO_DEFAULT_KEYMAP.get('msg_time'):
commands.append('no udld message-time')
else:
commands.append('udld message-time ' + value)
if reset:
command = 'udld reset'
commands.append(command)
return commands
def get_commands_remove_udld_global(existing):
commands = []
if existing.get('aggressive') == 'enabled':
command = 'no udld aggressive'
commands.append(command)
if existing.get('msg_time') != PARAM_TO_DEFAULT_KEYMAP.get('msg_time'):
command = 'no udld message-time'
commands.append(command)
return commands
def get_udld_global(module):
command = 'show udld global'
udld_table = execute_show_command(command, module)[0]
status = str(udld_table.get('udld-global-mode', None))
if status == 'enabled-aggressive':
aggressive = 'enabled'
else:
aggressive = 'disabled'
interval = str(udld_table.get('message-interval', None))
udld = dict(msg_time=interval, aggressive=aggressive)
return udld
def main():
argument_spec = dict(
aggressive=dict(required=False, choices=['enabled', 'disabled']),
msg_time=dict(required=False, type='str'),
reset=dict(required=False, type='bool'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
aggressive = module.params['aggressive']
msg_time = module.params['msg_time']
reset = module.params['reset']
state = module.params['state']
if reset and state == 'absent':
module.fail_json(msg="state must be present when using reset flag.")
args = dict(aggressive=aggressive, msg_time=msg_time, reset=reset)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_udld_global(module)
end_state = existing
delta = set(proposed.items()).difference(existing.items())
changed = False
commands = []
if state == 'present':
if delta:
command = get_commands_config_udld_global(dict(delta), reset, existing)
commands.append(command)
elif state == 'absent':
command = get_commands_remove_udld_global(existing)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_udld_global(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Authors : David Castellanos
#
# Copyright (c) 2012, Telefonica Móviles España S.A.U.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from mobilemanager.mmdbus.service import method
from mobilemanager.devices.ModemGsmExceptions import IncorrectPassword
MM_URI = 'org.freedesktop.ModemManager.Modem'
MM_URI_DBG = 'org.freedesktop.ModemManager.Debug'
class Modem(object):
@method(MM_URI,
in_signature='', out_signature='b',
method_name='IsOperatorLocked')
def m_is_operator_locked(self):
def function(task):
cmd = 'AT^CARDLOCK?'
regex = '\^CARDLOCK: (?P.+),(?P.+),(?P.+)'
r_values = ['status', 'times', 'operator']
res = self.io.com.send_query({"type" : "regex",
"cmd" : cmd,
"task" : task,
"regex" : regex,
"r_values" : r_values})
is_operator_locked = False
if (res is not None) and (res['status'] == '1'):
is_operator_locked = True
return is_operator_locked
task_msg = "[Huawei] Is Device Operator Locked?"
return self.io.task_pool.exec_task(function, task_msg=task_msg)
@method(MM_URI,
in_signature='s', out_signature='',
method_name='UnlockOperator')
def m_unlock_operator(self, unlock_code):
def function(task):
cmd = 'AT^CARDLOCK="%s"' % unlock_code
res = self.io.com.send_query({"type" : "simple",
"cmd" : cmd,
"task" : task })
if res is not True:
raise IncorrectPassword
task_msg = "[Huawei] Device Operator Unlock"
self.io.task_pool.exec_task(function, task_msg=task_msg)
"""
Pylot
"""
import os
import datetime
import inspect
from werkzeug.contrib.fixers import ProxyFix
from flask_classy import (FlaskView,
route)
from flask import (Flask,
abort,
redirect,
request,
render_template,
flash,
url_for,
jsonify,
session)
from flask_assets import Environment
from flask_kvsession import KVSessionExtension
from simplekv.memory.redisstore import RedisStore
import utils
# ------------------------------------------------------------------------------
import pkginfo
NAME = pkginfo.NAME
__version__ = pkginfo.VERSION
__author__ = pkginfo.AUTHOR
__license__ = pkginfo.LICENSE
__copyright__ = pkginfo.COPYRIGHT
# ------------------------------------------------------------------------------
class Pylot(FlaskView):
"""
Pylot a FlaskView extension
"""
LAYOUT = "layout.html" # The default layout
assets = None
_app = None
_bind = set()
_context = dict(
APP_NAME="",
APP_VERSION="",
YEAR=datetime.datetime.now().year,
GOOGLE_ANALYTICS_ID=None,
LOGIN_ENABLED=False,
LOGIN_OAUTH_ENABLED=False,
LOGIN_OAUTH_CLIENT_IDS=[],
LOGIN_OAUTH_BUTTONS=[],
META=dict(
title="",
description="",
url="",
image="",
site_name="",
object_type="",
locale="",
keywords=[],
use_opengraph=True,
use_googleplus=True,
use_twitter=True
)
)
@classmethod
def init(cls, flask_or_import_name, directory=None, config=None):
"""
Allow to register all subclasses of Pylot
So we call it once initiating
:param flask_or_import_name: Flask instance or import name -> __name__
:param directory: The directory containing your project's Views, Templates and Static
:param config: string of config object. ie: "app.config.Dev"
"""
if isinstance(flask_or_import_name, Flask):
app = flask_or_import_name
else:
app = Flask(flask_or_import_name)
app.wsgi_app = ProxyFix(app.wsgi_app)
if config:
app.config.from_object(config)
if directory:
app.template_folder = directory + "/templates"
app.static_folder = directory + "/static"
cls._app = app
cls.assets = Environment(cls._app)
for _app in cls._bind:
_app(cls._app)
for subcls in cls.__subclasses__():
subcls.register(cls._app)
return cls._app
@classmethod
def bind_(cls, kls):
"""
To bind application that needs the 'app' object to init
:param app: callable function that will receive 'Flask.app' as first arg
"""
if not hasattr(kls, "__call__"):
raise TypeError("From Pylot.bind_: '%s' is not callable" % kls)
cls._bind.add(kls)
return kls
@classmethod
def extends_(cls, kls):
"""
A view decorator to extend another view class or function to itself
It will inherit all its methods and propeties and use them on itself
-- EXAMPLES --
class Index(Pylot):
pass
index = Index()
::-> As decorator on classes ::
@index.extends_
class A(object):
def hello(self):
pass
@index.extends_
class C()
def world(self):
pass
::-> Decorator With function call ::
@index.extends_
def hello(self):
pass
"""
if inspect.isclass(kls):
for _name, _val in kls.__dict__.items():
if not _name.startswith("__"):
setattr(cls, _name, _val)
elif inspect.isfunction(kls):
setattr(cls, kls.__name__, kls)
return cls
@classmethod
def context_(cls, **kwargs):
"""
Assign a global view context to be used in the template
:params **kwargs:
"""
cls._context.update(kwargs)
@classmethod
def config_(cls, key, default=None):
"""
Shortcut to access the config in your class
:param key: The key to access
:param default: The default value when None
:returns mixed:
"""
return cls._app.config.get(key, default)
@classmethod
def meta_(cls, **kwargs):
"""
Meta allows you to add meta data to site
:params **kwargs:
meta keys we're expecting:
title (str)
description (str)
url (str) (Will pick it up by itself if not set)
image (str)
site_name (str) (but can pick it up from config file)
object_type (str)
keywords (list)
locale (str)
**Boolean By default these keys are True
use_opengraph
use_twitter
use_googleplus
"""
_name_ = "META"
meta_data = cls._context.get(_name_, {})
for k, v in kwargs.items():
# Prepend/Append string
if (k.endswith("__prepend") or k.endswith("__append")) \
and isinstance(v, str):
k, position = k.split("__", 2)
_v = meta_data.get(k, "")
if position == "prepend":
v += _v
elif position == "append":
v = _v + v
if k == "keywords" and not isinstance(k, list):
raise ValueError("Meta keyword must be a list")
meta_data[k] = v
cls.context_(_name_=meta_data)
@classmethod
def success_(cls, message):
"""
Set a flash success message
"""
flash(message, "success")
@classmethod
def error_(cls, message):
"""
Set a flash error message
"""
flash(message, "error")
@classmethod
def render(cls, data={}, view_template=None, layout=None, **kwargs):
"""
To render data to the associate template file of the action view
:param data: The context data to pass to the template
:param view_template: The file template to use. By default it will map the classname/action.html
:param layout: The body layout, must contain {% include __view_template__ %}
"""
if not view_template:
stack = inspect.stack()[1]
module = inspect.getmodule(cls).__name__
module_name = module.split(".")[-1]
action_name = stack[3] # The method being called in the class
view_name = cls.__name__ # The name of the class without View
if view_name.endswith("View"):
view_name = view_name[:-4]
view_template = "%s/%s.html" % (view_name, action_name)
data = data if data else dict()
data["__"] = cls._context if cls._context else {}
if kwargs:
data.update(kwargs)
data["__view_template__"] = view_template
return render_template(layout or cls.LAYOUT, **data)
class Mailer(object):
"""
A simple wrapper to switch between SES-Mailer and Flask-Mail based on config
"""
mail = None
provider = None
def init_app(self, app):
import ses_mailer
import flask_mail
self.app = app
self.provider = app.config.get("MAILER_BACKEND", "SES").upper()
if self.provider not in ["SES", "FLASK-MAIL"]:
raise AttributeError("Invalid Mail provider")
if self.provider == "SES":
self.mail = ses_mailer.Mail(app=app)
elif self.provider == "FLASK-MAIL":
self.mail = flask_mail.Mail(app)
def send(self, to, subject, body, reply_to=None, **kwargs):
"""
Send simple message
"""
if self.provider == "SES":
self.mail.send(to=to,
subject=subject,
body=body,
reply_to=reply_to,
**kwargs)
elif self.provider == "FLASK-MAIL":
msg = flask_mail.Message(recipients=to, subject=subject, body=body, reply_to=reply_to,
sender=self.app.config.get("MAIL_DEFAULT_SENDER"))
self.mail.send(msg)
def send_template(self, template, to, reply_to=None, **context):
"""
Send Template message
"""
if self.provider == "SES":
self.mail.send_template(template=template, to=to, reply_to=reply_to, **context)
elif self.provider == "FLASK-MAIL":
ses_mail = ses_mailer.Mail(app=self.app)
data = ses_mail.parse_template(template=template, **context)
msg = flask_mail.Message(recipients=to,
subject=data["subject"],
body=data["body"],
reply_to=reply_to,
sender=self.app.config.get("MAIL_DEFAULT_SENDER")
)
self.mail.send(msg)
class Storage(object):
store = None
def init_app(self, app):
import flask_store
type = app.config.get("STORAGE_BACKEND", "LOCAL")
if type == "S3":
provider = "flask_store.providers.s3.S3Provider"
elif type == "LOCAL":
provider = "flask_store.providers.local.LocalProvider"
else:
provider = app.config.get("STORAGE_BACKEND")
bucket = app.config.get("STORAGE_S3_BUCKET", "")
domain = app.config.get("STORAGE_DOMAIN", "https://s3.amazonaws.com/%s/" % bucket)
app.config.update({
"STORE_PROVIDER": provider,
"STORE_PATH": app.config.get("STORAGE_PATH"),
"STORE_URL_PREFIX": app.config.get("STORAGE_URL_PREFIX", "files"),
"STORE_DOMAIN": domain,
"STORE_S3_REGION": app.config.get("STORAGE_S3_REGION", "us-east-1"),
"STORE_S3_BUCKET": bucket,
"STORE_S3_ACCESS_KEY": app.config.get("AWS_ACCESS_KEY_ID"),
"STORE_S3_SECRET_KEY": app.config.get("AWS_SECRET_ACCESS_KEY")
})
self.store = flask_store.Store(app=app)
def get_url(self, file, absolute=False):
provider = self.store.Provider(file)
return provider.absolute_url if absolute else provider.relative_url
def get_path(self, file, absolute=False):
provider = self.store.Provider(file)
return provider.absolute_path if absolute else provider.relative_path
def get(self):
pass
def put(self, file):
provider = self.store.Provider(file)
provider.save()
return dict(filename=provider.filename,
relative_url=provider.relative_url,
absolute_url=provider.absolute_url,
absolute_path=provider.absolute_path)
def exists(self, file):
provider = self.store.Provider(file)
return provider.exists()
class Cache(object):
pass
class Session(object):
def __init__(self, app):
self.app = app
# SESSION
store = None
backend = self.app.config.get("SESSION_BACKEND")
if backend:
backend = backend.upper()
if backend == "REDIS":
uri = self.app.config.get("SESSION_BACKEND_URI")
_redis = utils.connect_redis(uri)
store = RedisStore(_redis)
if store:
KVSessionExtension(store, self.app)
class AppError(Exception):
""" For exception in application pages """
pass
# ------------------------------------------------------------------------------
# Setup facade
mailer = Mailer()
storage = Storage()
cache = Cache()
Pylot.bind_(Session)
Pylot.bind_(mailer.init_app)
Pylot.bind_(storage.init_app)
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL ().
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
#
##############################################################################
{
'name': 'OpenOffice Report Designer',
'version': '0.1',
'category': 'Reporting',
'description': """
This module is used along with OpenERP OpenOffice Plugin.
=========================================================
This module adds wizards to Import/Export .sxw report that you can modify in OpenOffice.
Once you have modified it you can upload the report using the same wizard.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': ['wizard/base_report_design_view.xml' , 'base_report_designer_installer.xml'],
'demo': [],
'installable': True,
'auto_install': False,
'images': ['images/base_report_designer1.jpeg','images/base_report_designer2.jpeg',],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
import os
from ConfigParser import SafeConfigParser
import warnings
class BuilderEnvironment(object):
"""Set up environment
"""
required_options = set(['cmake', 'wget', 'pkg_config', 'tar', 'xz', 'svn'])
def __init__(self, cfg_file='bob.cfg', environ={}):
self._environ = environ
self._environ.update(self.read(cfg_file))
def warn_missing_options(self, opts):
missing = self.required_options - set(opts)
if len(missing) > 0:
warnings.warn('%s: missing required option(s)' %
', '.join(missing))
def warn_unknown_options(self, opts):
unknowns = set(opts) - self.required_options
if len(unknowns) > 0:
warnings.warn('%s: unrecognized option(s)' % ', '.join(unknowns))
def read(self, cfg_file):
parser = SafeConfigParser()
parser.optionxform = str
with open(cfg_file, 'r') as cfg:
parser.readfp(cfg)
prog_paths = {}
try:
paths = parser.items('bob')
except NoSectionError:
warnings.warn('%s: not a bob cfg file.' % cfg_file)
else:
self.warn_missing_options(parser.options('bob'))
for prog in parser.options('bob'):
try:
prog_paths[prog] = parser.get('bob', prog)
except (NoSectionError, NoOptionError):
prog_paths[prog] = prog
return prog_paths
def tobash(self):
lines = []
for item in self._environ.items():
lines.append('export %s="%s"' % item)
lines.append('')
return os.linesep.join(lines)
# (c) 2012, Michael DeHaan
# (c) 2013, Steven Dossett
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.lookup import LookupBase
from ansible.inventory import Inventory
class LookupModule(LookupBase):
def get_hosts(self, variables, pattern):
hosts = []
if pattern[0] in ('!','&'):
obj = pattern[1:]
else:
obj = pattern
if obj in variables['groups']:
hosts = variables['groups'][obj]
elif obj in variables['groups']['all']:
hosts = [obj]
return hosts
def run(self, terms, variables=None, **kwargs):
host_list = []
for term in terms:
patterns = Inventory.order_patterns(Inventory.split_host_pattern(term))
for p in patterns:
that = self.get_hosts(variables, p)
if p.startswith("!"):
host_list = [ h for h in host_list if h not in that]
elif p.startswith("&"):
host_list = [ h for h in host_list if h in that ]
else:
host_list.extend(that)
# return unique list
return list(set(host_list))
"""
"""
from PIL import Image
import HTMLParser
import string
import re
import os.path
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext as _
from django.core.paginator import Paginator, InvalidPage
from django.template.loader import get_template
from django.template import TemplateDoesNotExist, RequestContext
from django.forms import ValidationError
from django import forms
from django.http import HttpResponse
from django.forms.fields import EMPTY_VALUES
from classifieds.conf import settings
from classifieds.search import SelectForm, searchForms
from classifieds.models import Ad, Field, Category, Pricing, PricingOptions
def category_template_name(category, page):
return os.path.join(u'classifieds/category',
category.template_prefix, page)
def render_category_page(request, category, page, context):
template_name = category_template_name(category, page)
try:
template = get_template(template_name)
except TemplateDoesNotExist:
template = get_template('classifieds/category/base/%s' % page)
context = RequestContext(request, context)
return HttpResponse(template.render(context))
def clean_adimageformset(self):
max_size = self.instance.category.images_max_size
for form in self.forms:
try:
if not hasattr(form.cleaned_data['full_photo'], 'file'):
continue
except:
continue
if form.cleaned_data['full_photo'].size > max_size:
raise forms.ValidationError(_(u'Maximum image size is %s KB') % \
str(max_size / 1024))
im = Image.open(form.cleaned_data['full_photo'].file)
allowed = self.instance.catoegy.images_allowed_formats
if allowed_formats.filter(format=im.format).count() == 0:
raise forms.ValidationError(
_(u'Your image must be in one of the following formats: ')\
+ ', '.join(allowed_formats.values_list('format',
flat=True)))
def context_sortable(request, ads, perpage=settings.ADS_PER_PAGE):
order = '-'
sort = 'expires_on'
page = 1
if 'perpage' in request.GET and request.GET['perpage'] != '':
perpage = int(request.GET['perpage'])
if 'order' in request.GET and request.GET['order'] != '':
if request.GET['order'] == 'desc':
order = '-'
elif request.GET['order'] == 'asc':
order = ''
if 'page' in request.GET:
page = int(request.GET['page'])
if 'sort' in request.GET and request.GET['sort'] != '':
sort = request.GET['sort']
if sort in ['created_on', 'expires_on', 'category', 'title']:
ads_sorted = ads.extra(select={'featured': """SELECT 1
FROM `classifieds_payment_options`
LEFT JOIN `classifieds_payment` ON `classifieds_payment_options`.`payment_id` = `classifieds_payment`.`id`
LEFT JOIN `classifieds_pricing` ON `classifieds_pricing`.`id` = `classifieds_payment`.`pricing_id`
LEFT JOIN `classifieds_pricingoptions` ON `classifieds_payment_options`.`pricingoptions_id` = `classifieds_pricingoptions`.`id`
WHERE `classifieds_pricingoptions`.`name` = %s
AND `classifieds_payment`.`ad_id` = `classifieds_ad`.`id`
AND `classifieds_payment`.`paid` =1
AND `classifieds_payment`.`paid_on` < NOW()
AND DATE_ADD( `classifieds_payment`.`paid_on` , INTERVAL `classifieds_pricing`.`length`
DAY ) > NOW()"""}, select_params=[PricingOptions.FEATURED_LISTING]).extra(order_by=['-featured', order + sort])
else:
ads_sorted = ads.extra(select=SortedDict([('fvorder', 'select value from classifieds_fieldvalue LEFT JOIN classifieds_field on classifieds_fieldvalue.field_id = classifieds_field.id where classifieds_field.name = %s and classifieds_fieldvalue.ad_id = classifieds_ad.id'), ('featured', """SELECT 1
FROM `classifieds_payment_options`
LEFT JOIN `classifieds_payment` ON `classifieds_payment_options`.`payment_id` = `classifieds_payment`.`id`
LEFT JOIN `classifieds_pricing` ON `classifieds_pricing`.`id` = `classifieds_payment`.`pricing_id`
LEFT JOIN `classifieds_pricingoptions` ON `classifieds_payment_options`.`pricingoptions_id` = `classifieds_pricingoptions`.`id`
WHERE `classifieds_pricingoptions`.`name` = %s
AND `classifieds_payment`.`ad_id` = `classifieds_ad`.`id`
AND `classifieds_payment`.`paid` =1
AND `classifieds_payment`.`paid_on` < NOW()
AND DATE_ADD( `classifieds_payment`.`paid_on` , INTERVAL `classifieds_pricing`.`length`
DAY ) > NOW()""")]), select_params=[sort, PricingOptions.FEATURED_LISTING]).extra(order_by=['-featured', order + 'fvorder'])
pager = Paginator(ads_sorted, perpage)
try:
page = pager.page(page)
except InvalidPage:
page = {'object_list': False}
can_sortby_list = []
sortby_list = ['created_on']
for category in Category.objects.filter(ad__in=ads.values('pk').query).distinct():
can_sortby_list += category.sortby_fields.split(',')
for category in Category.objects.filter(ad__in=ads.values('pk').query).distinct():
for fieldname, in category.field_set.values_list('name'):
if fieldname not in sortby_list and fieldname in can_sortby_list:
sortby_list.append(fieldname)
for fieldname, in Field.objects.filter(category=None).values_list('name'):
if fieldname not in sortby_list and fieldname in can_sortby_list:
sortby_list.append(fieldname)
return {'page': page, 'sortfields': sortby_list, 'no_results': False,
'perpage': perpage}
def prepare_sforms(fields, fields_left, post=None):
sforms = []
select_fields = {}
for field in fields:
if field.field_type == Field.SELECT_FIELD: # is select field
# add select field
options = field.options.split(',')
choices = zip(options, options)
choices.insert(0, ('', 'Any',))
form_field = forms.ChoiceField(label=field.label, required=False, help_text=field.help_text + u'\nHold ctrl or command on Mac for multiple selections.', choices=choices, widget=forms.SelectMultiple)
# remove this field from fields_list
fields_left.remove(field.name)
select_fields[field.name] = form_field
sforms.append(SelectForm.create(select_fields, post))
for sf in searchForms:
f = sf.create(fields, fields_left, post)
if f is not None:
sforms.append(f)
return sforms
class StrippingParser(HTMLParser.HTMLParser):
# These are the HTML tags that we will leave intact
valid_tags = ('b', 'i', 'br', 'p', 'strong', 'h1', 'h2', 'h3', 'em',
'span', 'ul', 'ol', 'li')
from htmlentitydefs import entitydefs # replace entitydefs from sgmllib
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.result = ""
self.endTagList = []
def handle_data(self, data):
if data:
self.result = self.result + data
def handle_charref(self, name):
self.result = "%s%s;" % (self.result, name)
def handle_entityref(self, name):
if name in self.entitydefs:
x = ';'
else:
# this breaks unstandard entities that end with ';'
x = ''
self.result = "%s&%s%s" % (self.result, name, x)
def handle_starttag(self, tag, attrs):
""" Delete all tags except for legal ones """
if tag in self.valid_tags:
self.result = self.result + '<' + tag
for k, v in attrs:
if string.lower(k[0:2]) != 'on' and \
string.lower(v[0:10]) != 'javascript':
self.result = '%s %s="%s"' % (self.result, k, v)
endTag = '%s>' % tag
self.endTagList.insert(0, endTag)
self.result = self.result + '>'
def handle_endtag(self, tag):
if tag in self.valid_tags:
self.result = "%s%s>" % (self.result, tag)
remTag = '%s>' % tag
self.endTagList.remove(remTag)
def cleanup(self):
""" Append missing closing tags """
for j in range(len(self.endTagList)):
self.result = self.result + self.endTagList[j]
def strip(s):
""" Strip illegal HTML tags from string s """
parser = StrippingParser()
parser.feed(s)
parser.close()
parser.cleanup()
return parser.result
class TinyMCEWidget(forms.Textarea):
def __init__(self, *args, **kwargs):
attrs = kwargs.setdefault('attrs', {})
if 'class' not in attrs:
attrs['class'] = 'tinymce'
else:
attrs['class'] += ' tinymce'
super(TinyMCEWidget, self).__init__(*args, **kwargs)
class Media:
js = ('js/tiny_mce/tiny_mce.js', 'js/tinymce_forms.js',)
class TinyMCEField(forms.CharField):
def clean(self, value):
"""Validates max_length and min_length. Returns a Unicode object."""
if value in EMPTY_VALUES:
return u''
stripped_value = re.sub(r'<.*?>', '', value)
stripped_value = string.replace(stripped_value, ' ', ' ')
stripped_value = string.replace(stripped_value, '<', '<')
stripped_value = string.replace(stripped_value, '>', '>')
stripped_value = string.replace(stripped_value, '&', '&')
stripped_value = string.replace(stripped_value, '\n', '')
stripped_value = string.replace(stripped_value, '\r', '')
value_length = len(stripped_value)
value_length -= 1
if self.max_length is not None and value_length > self.max_length:
raise forms.ValidationError(self.error_messages['max_length'] % {'max': self.max_length, 'length': value_length})
if self.min_length is not None and value_length < self.min_length:
raise forms.ValidationError(self.error_messages['min_length'] % {'min': self.min_length, 'length': value_length})
return value
def field_list(instance):
class MockField:
def __init__(self, name, field_type, label, required, help_text, enable_wysiwyg, max_length):
self.name = name
self.field_type = field_type
self.label = label
self.required = required
self.help_text = help_text
self.enable_wysiwyg = enable_wysiwyg
self.max_length = max_length
title_field = MockField('title', Field.CHAR_FIELD, _('Title'), True, '', False, 100)
fields = [title_field] # all ads have titles
fields += list(instance.category.field_set.all())
fields += list(Field.objects.filter(category=None))
return fields
def fields_for_ad(instance):
# generate a sorted dict of fields corresponding to the Field model
# for the Ad instance
fields_dict = SortedDict()
fields = field_list(instance)
# this really, really should be refactored
for field in fields:
if field.field_type == Field.BOOLEAN_FIELD:
fields_dict[field.name] = forms.BooleanField(label=field.label, required=False, help_text=field.help_text)
elif field.field_type == Field.CHAR_FIELD:
widget = forms.TextInput
fields_dict[field.name] = forms.CharField(label=field.label, required=field.required, max_length=field.max_length, help_text=field.help_text, widget=widget)
elif field.field_type == Field.DATE_FIELD:
fields_dict[field.name] = forms.DateField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.DATETIME_FIELD:
fields_dict[field.name] = forms.DateTimeField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.EMAIL_FIELD:
fields_dict[field.name] = forms.EmailField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.FLOAT_FIELD:
fields_dict[field.name] = forms.FloatField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.INTEGER_FIELD:
fields_dict[field.name] = forms.IntegerField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.TIME_FIELD:
fields_dict[field.name] = forms.TimeField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.URL_FIELD:
fields_dict[field.name] = forms.URLField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.SELECT_FIELD:
options = field.options.split(',')
fields_dict[field.name] = forms.ChoiceField(label=field.label, required=field.required, help_text=field.help_text, choices=zip(options, options))
elif field.field_type == Field.TEXT_FIELD:
if field.enable_wysiwyg:
widget = TinyMCEWidget
field_type = TinyMCEField
else:
widget = forms.Textarea
field_type = forms.CharField
fields_dict[field.name] = field_type(label=field.label,
required=field.required,
help_text=field.help_text,
max_length=field.max_length,
widget=widget)
else:
raise NotImplementedError(u'Unknown field type "%s"' % field.get_field_type_display())
return fields_dict
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class Schema(object):
"""
Represents a DynamoDB schema.
:ivar hash_key_name: The name of the hash key of the schema.
:ivar hash_key_type: The DynamoDB type specification for the
hash key of the schema.
:ivar range_key_name: The name of the range key of the schema
or None if no range key is defined.
:ivar range_key_type: The DynamoDB type specification for the
range key of the schema or None if no range key is defined.
:ivar dict: The underlying Python dictionary that needs to be
passed to Layer1 methods.
"""
def __init__(self, schema_dict):
self._dict = schema_dict
def __repr__(self):
if self.range_key_name:
s = 'Schema(%s:%s)' % (self.hash_key_name, self.range_key_name)
else:
s = 'Schema(%s)' % self.hash_key_name
return s
@classmethod
def create(cls, hash_key, range_key=None):
"""Convenience method to create a schema object.
Example usage::
schema = Schema.create(hash_key=('foo', 'N'))
schema2 = Schema.create(hash_key=('foo', 'N'),
range_key=('bar', 'S'))
:type hash_key: tuple
:param hash_key: A tuple of (hash_key_name, hash_key_type)
:type range_key: tuple
:param hash_key: A tuple of (range_key_name, range_key_type)
"""
reconstructed = {
'HashKeyElement': {
'AttributeName': hash_key[0],
'AttributeType': hash_key[1],
}
}
if range_key is not None:
reconstructed['RangeKeyElement'] = {
'AttributeName': range_key[0],
'AttributeType': range_key[1],
}
instance = cls(None)
instance._dict = reconstructed
return instance
@property
def dict(self):
return self._dict
@property
def hash_key_name(self):
return self._dict['HashKeyElement']['AttributeName']
@property
def hash_key_type(self):
return self._dict['HashKeyElement']['AttributeType']
@property
def range_key_name(self):
name = None
if 'RangeKeyElement' in self._dict:
name = self._dict['RangeKeyElement']['AttributeName']
return name
@property
def range_key_type(self):
type = None
if 'RangeKeyElement' in self._dict:
type = self._dict['RangeKeyElement']['AttributeType']
return type
def __eq__(self, other):
return (self.hash_key_name == other.hash_key_name and
self.hash_key_type == other.hash_key_type and
self.range_key_name == other.range_key_name and
self.range_key_type == other.range_key_type)
import socket
import time
import machine
led1 = machine.Pin(5, machine.Pin.OUT)
led2 = machine.Pin(4, machine.Pin.OUT)
adc = machine.ADC(0)
s = socket.socket()
host = "To Do: Enter ip-address of remote server"
port = 12344
counter = 0
while True:
try:
while True:
s = socket.socket()
s.connect((host, port))
time.sleep(1)
s.send("ready")
output = s.recv(2048)
print(output)
if "disconnect" in output:
s.close()
if counter == 1:
if "hi sis" in output:
p13 = machine.Pin(13)
pwm13 = machine.PWM(p13)
servo_s = machine.PWM(machine.Pin(13), freq=50)
p14 = machine.Pin(14)
pwm14 = machine.PWM(p14)
servo_b = machine.PWM(machine.Pin(14), freq=50)
p12 = machine.Pin(12)
pwm12 = machine.PWM(p12)
servo_a = machine.PWM(machine.Pin(12), freq=50)
servo_s.duty(30)
servo_b.duty(60)
servo_a.duty(100)
time.sleep(3)
servo_s.duty(50)
servo_a.duty(60)
servo_b.duty(50)
time.sleep(2)
servo_s.duty(70)
servo_a.duty(80)
servo_b.duty(30)
time.sleep(1)
counter = 0
elif "High five" in output:
p13 = machine.Pin(13)
pwm13 = machine.PWM(p13)
servo_s = machine.PWM(machine.Pin(13), freq=50)
p14 = machine.Pin(14)
pwm14 = machine.PWM(p14)
servo_b = machine.PWM(machine.Pin(14), freq=50)
p12 = machine.Pin(12)
pwm12 = machine.PWM(p12)
servo_a = machine.PWM(machine.Pin(12), freq=50)
servo_s.duty(30)
servo_b.duty(60)
servo_a.duty(100)
time.sleep(3)
servo_s.duty(50)
servo_a.duty(80)
servo_b.duty(60)
if adc.read() > 200:
for i in range(3):
led1.high()
time.sleep(0.1)
led1.low()
time.sleep(0.1)
led2.high()
time.sleep(0.1)
led2.low()
time.sleep(0.3)
else:
led1.high()
time.sleep(2)
servo_s.duty(70)
servo_a.duty(80)
servo_b.duty(30)
time.sleep(1)
counter = 0
else:
counter = 1
except:
pass
"""An FTP client class and some helper functions.
Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
Example:
>>> from ftplib import FTP
>>> ftp = FTP('ftp.python.org') # connect to host, default port
>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
'230 Guest login ok, access restrictions apply.'
>>> ftp.retrlines('LIST') # list directory contents
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftp.quit()
'221 Goodbye.'
>>>
A nice test that reveals some of the network dialogue would be:
python ftplib.py -d localhost -l -p -l
"""
#
# Changes and improvements suggested by Steve Majewski.
# Modified by Jack to work on the mac.
# Modified by Siebren to support docstrings and PASV.
# Modified by Phil Schwartz to add storbinary and storlines callbacks.
# Modified by Giampaolo Rodola' to add TLS support.
#
import os
import sys
import socket
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["FTP","Netrc"]
# Magic number from
MSG_OOB = 0x1 # Process data out of band
# The standard FTP server control port
FTP_PORT = 21
# Exception raised when an error or invalid response is received
class Error(Exception): pass
class error_reply(Error): pass # unexpected [123]xx reply
class error_temp(Error): pass # 4xx errors
class error_perm(Error): pass # 5xx errors
class error_proto(Error): pass # response does not begin with [1-5]
# All exceptions (hopefully) that may be raised here and that aren't
# (always) programming errors on our side
all_errors = (Error, IOError, EOFError)
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
B_CRLF = b'\r\n'
# The class itself
class FTP:
'''An FTP client class.
To create a connection, call the class using these arguments:
host, user, passwd, acct, timeout
The first four arguments are all strings, and have default value ''.
timeout must be numeric and defaults to None if not passed,
meaning that no timeout will be set on any ftp socket(s)
If a timeout is passed, then this is now the default timeout for all ftp
socket operations for this instance.
Then use self.connect() with optional host and port argument.
To download a file, use ftp.retrlines('RETR ' + filename),
or ftp.retrbinary() with slightly different arguments.
To upload a file, use ftp.storlines() or ftp.storbinary(),
which have an open file as argument (see their definitions
below for details).
The download/upload functions first issue appropriate TYPE
and PORT or PASV commands.
'''
debugging = 0
host = ''
port = FTP_PORT
sock = None
file = None
welcome = None
passiveserver = 1
encoding = "latin1"
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
# Optional arguments are host (for connect()),
# and user, passwd, acct (for login())
def __init__(self, host='', user='', passwd='', acct='',
timeout=_GLOBAL_DEFAULT_TIMEOUT):
self.timeout = timeout
if host:
self.connect(host)
if user:
self.login(user, passwd, acct)
def __enter__(self):
return self
# Context management protocol: try to quit() if active
def __exit__(self, *args):
if self.sock is not None:
try:
self.quit()
except (socket.error, EOFError):
pass
finally:
if self.sock is not None:
self.close()
def connect(self, host='', port=0, timeout=-999):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
self.sock = socket.create_connection((self.host, self.port), self.timeout)
self.af = self.sock.family
self.file = self.sock.makefile('r', encoding=self.encoding)
self.welcome = self.getresp()
return self.welcome
def getwelcome(self):
'''Get the welcome message from the server.
(this is read and squirreled away by connect())'''
if self.debugging:
print('*welcome*', self.sanitize(self.welcome))
return self.welcome
def set_debuglevel(self, level):
'''Set the debugging level.
The required argument level means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF'''
self.debugging = level
debug = set_debuglevel
def set_pasv(self, val):
'''Use passive or active mode for data transfers.
With a false argument, use the normal PORT mode,
With a true argument, use the PASV command.'''
self.passiveserver = val
# Internal: "sanitize" a string for printing
def sanitize(self, s):
if s[:5] == 'pass ' or s[:5] == 'PASS ':
i = len(s)
while i > 5 and s[i-1] in {'\r', '\n'}:
i = i-1
s = s[:5] + '*'*(i-5) + s[i:]
return repr(s)
# Internal: send one line to the server, appending CRLF
def putline(self, line):
line = line + CRLF
if self.debugging > 1: print('*put*', self.sanitize(line))
self.sock.sendall(line.encode(self.encoding))
# Internal: send one command to the server (through putline())
def putcmd(self, line):
if self.debugging: print('*cmd*', self.sanitize(line))
self.putline(line)
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
line = self.file.readline()
if self.debugging > 1:
print('*get*', self.sanitize(line))
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
# Internal: get a response from the server, which may possibly
# consist of multiple lines. Return a single string with no
# trailing CRLF. If the response consists of multiple lines,
# these are separated by '\n' characters in the string
def getmultiline(self):
line = self.getline()
if line[3:4] == '-':
code = line[:3]
while 1:
nextline = self.getline()
line = line + ('\n' + nextline)
if nextline[:3] == code and \
nextline[3:4] != '-':
break
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
resp = self.getmultiline()
if self.debugging: print('*resp*', self.sanitize(resp))
self.lastresp = resp[:3]
c = resp[:1]
if c in {'1', '2', '3'}:
return resp
if c == '4':
raise error_temp(resp)
if c == '5':
raise error_perm(resp)
raise error_proto(resp)
def voidresp(self):
"""Expect a response beginning with '2'."""
resp = self.getresp()
if resp[:1] != '2':
raise error_reply(resp)
return resp
def abort(self):
'''Abort a file transfer. Uses out-of-band data.
This does not follow the procedure from the RFC to send Telnet
IP and Synch; that doesn't seem to work with the servers I've
tried. Instead, just send the ABOR command as OOB data.'''
line = b'ABOR' + B_CRLF
if self.debugging > 1: print('*put urgent*', self.sanitize(line))
self.sock.sendall(line, MSG_OOB)
resp = self.getmultiline()
if resp[:3] not in {'426', '225', '226'}:
raise error_proto(resp)
return resp
def sendcmd(self, cmd):
'''Send a command and return the response.'''
self.putcmd(cmd)
return self.getresp()
def voidcmd(self, cmd):
"""Send a command and expect a response beginning with '2'."""
self.putcmd(cmd)
return self.voidresp()
def sendport(self, host, port):
'''Send a PORT command with the current host and the given
port number.
'''
hbytes = host.split('.')
pbytes = [repr(port//256), repr(port%256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + ','.join(bytes)
return self.voidcmd(cmd)
def sendeprt(self, host, port):
'''Send a EPRT command with the current host and the given port number.'''
af = 0
if self.af == socket.AF_INET:
af = 1
if self.af == socket.AF_INET6:
af = 2
if af == 0:
raise error_proto('unsupported address family')
fields = ['', repr(af), host, repr(port), '']
cmd = 'EPRT ' + '|'.join(fields)
return self.voidcmd(cmd)
def makeport(self):
'''Create a new socket and send a PORT command for it.'''
msg = "getaddrinfo returns an empty list"
sock = None
for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.bind(sa)
except socket.error as msg:
if sock:
sock.close()
sock = None
continue
break
if not sock:
raise socket.error(msg)
sock.listen(1)
port = sock.getsockname()[1] # Get proper port
host = self.sock.getsockname()[0] # Get proper host
if self.af == socket.AF_INET:
resp = self.sendport(host, port)
else:
resp = self.sendeprt(host, port)
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(self.timeout)
return sock
def makepasv(self):
if self.af == socket.AF_INET:
host, port = parse227(self.sendcmd('PASV'))
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
def ntransfercmd(self, cmd, rest=None):
"""Initiate a transfer over the data connection.
If the transfer is active, send a port command and the
transfer command, and accept the connection. If the server is
passive, send a pasv command, connect to it, and start the
transfer command. Either way, return the socket for the
connection and the expected size of the transfer. The
expected size may be None if it could not be determined.
Optional `rest' argument can be a string that is sent as the
argument to a REST command. This is essentially a server
marker used to tell the server to skip over any data up to the
given marker.
"""
size = None
if self.passiveserver:
host, port = self.makepasv()
conn = socket.create_connection((host, port), self.timeout)
try:
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# Some servers apparently send a 200 reply to
# a LIST or STOR command, before the 150 reply
# (and way before the 226 reply). This seems to
# be in violation of the protocol (which only allows
# 1xx or error messages for LIST), so we just discard
# this response.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply(resp)
except:
conn.close()
raise
else:
sock = self.makeport()
try:
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# See above.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply(resp)
conn, sockaddr = sock.accept()
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
conn.settimeout(self.timeout)
finally:
sock.close()
if resp[:3] == '150':
# this is conditional in case we received a 125
size = parse150(resp)
return conn, size
def transfercmd(self, cmd, rest=None):
"""Like ntransfercmd() but returns only the socket."""
return self.ntransfercmd(cmd, rest)[0]
def login(self, user = '', passwd = '', acct = ''):
'''Login, default anonymous.'''
if not user: user = 'anonymous'
if not passwd: passwd = ''
if not acct: acct = ''
if user == 'anonymous' and passwd in {'', '-'}:
# If there is no anonymous ftp password specified
# then we'll just use anonymous@
# We don't send any other thing because:
# - We want to remain anonymous
# - We want to stop SPAM
# - We don't want to let ftp sites to discriminate by the user,
# host or country.
passwd = passwd + 'anonymous@'
resp = self.sendcmd('USER ' + user)
if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
if resp[0] != '2':
raise error_reply(resp)
return resp
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
"""Retrieve data in binary mode. A new port is created for you.
Args:
cmd: A RETR command.
callback: A single parameter callable to be called on each
block of data read.
blocksize: The maximum number of bytes to read from the
socket at one time. [default: 8192]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
with self.transfercmd(cmd, rest) as conn:
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
return self.voidresp()
def retrlines(self, cmd, callback = None):
"""Retrieve data in line mode. A new port is created for you.
Args:
cmd: A RETR, LIST, NLST, or MLSD command.
callback: An optional single parameter callable that is called
for each line with the trailing CRLF stripped.
[default: print_line()]
Returns:
The response code.
"""
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
with self.transfercmd(cmd) as conn, \
conn.makefile('r', encoding=self.encoding) as fp:
while 1:
line = fp.readline()
if self.debugging > 2: print('*retr*', repr(line))
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
"""Store a file in binary mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a read(num_bytes) method.
blocksize: The maximum data size to read from fp and send over
the connection at once. [default: 8192]
callback: An optional single parameter callable that is called on
on each block of data after it is sent. [default: None]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
with self.transfercmd(cmd, rest) as conn:
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
"""Store a file in line mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a readline() method.
callback: An optional single parameter callable that is called on
on each line after it is sent. [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE A')
with self.transfercmd(cmd) as conn:
while 1:
buf = fp.readline()
if not buf: break
if buf[-2:] != B_CRLF:
if buf[-1] in B_CRLF: buf = buf[:-1]
buf = buf + B_CRLF
conn.sendall(buf)
if callback: callback(buf)
return self.voidresp()
def acct(self, password):
'''Send new account name.'''
cmd = 'ACCT ' + password
return self.voidcmd(cmd)
def nlst(self, *args):
'''Return a list of files in a given directory (default the current).'''
cmd = 'NLST'
for arg in args:
cmd = cmd + (' ' + arg)
files = []
self.retrlines(cmd, files.append)
return files
def dir(self, *args):
'''List a directory in long form.
By default list current directory to stdout.
Optional last argument is callback function; all
non-empty arguments before it are concatenated to the
LIST command. (This *should* only be used for a pathname.)'''
cmd = 'LIST'
func = None
if args[-1:] and type(args[-1]) != type(''):
args, func = args[:-1], args[-1]
for arg in args:
if arg:
cmd = cmd + (' ' + arg)
self.retrlines(cmd, func)
def rename(self, fromname, toname):
'''Rename a file.'''
resp = self.sendcmd('RNFR ' + fromname)
if resp[0] != '3':
raise error_reply(resp)
return self.voidcmd('RNTO ' + toname)
def delete(self, filename):
'''Delete a file.'''
resp = self.sendcmd('DELE ' + filename)
if resp[:3] in {'250', '200'}:
return resp
else:
raise error_reply(resp)
def cwd(self, dirname):
'''Change to a directory.'''
if dirname == '..':
try:
return self.voidcmd('CDUP')
except error_perm as msg:
if msg.args[0][:3] != '500':
raise
elif dirname == '':
dirname = '.' # does nothing, but could return error
cmd = 'CWD ' + dirname
return self.voidcmd(cmd)
def size(self, filename):
'''Retrieve the size of a file.'''
# The SIZE command is defined in RFC-3659
resp = self.sendcmd('SIZE ' + filename)
if resp[:3] == '213':
s = resp[3:].strip()
try:
return int(s)
except (OverflowError, ValueError):
return int(s)
def mkd(self, dirname):
'''Make a directory, return its full pathname.'''
resp = self.voidcmd('MKD ' + dirname)
# fix around non-compliant implementations such as IIS shipped
# with Windows server 2003
if not resp.startswith('257'):
return ''
return parse257(resp)
def rmd(self, dirname):
'''Remove a directory.'''
return self.voidcmd('RMD ' + dirname)
def pwd(self):
'''Return current working directory.'''
resp = self.voidcmd('PWD')
# fix around non-compliant implementations such as IIS shipped
# with Windows server 2003
if not resp.startswith('257'):
return ''
return parse257(resp)
def quit(self):
'''Quit, and close the connection.'''
resp = self.voidcmd('QUIT')
self.close()
return resp
def close(self):
'''Close the connection without assuming anything about it.'''
if self.file:
self.file.close()
self.sock.close()
self.file = self.sock = None
try:
import ssl
except ImportError:
pass
else:
class FTP_TLS(FTP):
'''A FTP subclass which adds TLS support to FTP as described
in RFC-4217.
Connect as usual to port 21 implicitly securing the FTP control
connection before authenticating.
Securing the data connection requires user to explicitly ask
for it by calling prot_p() method.
Usage example:
>>> from ftplib import FTP_TLS
>>> ftps = FTP_TLS('ftp.python.org')
>>> ftps.login() # login anonymously previously securing control channel
'230 Guest login ok, access restrictions apply.'
>>> ftps.prot_p() # switch to secure data connection
'200 Protection level set to P'
>>> ftps.retrlines('LIST') # list directory content securely
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftps.quit()
'221 Goodbye.'
>>>
'''
ssl_version = ssl.PROTOCOL_TLSv1
def __init__(self, host='', user='', passwd='', acct='', keyfile=None,
certfile=None, context=None,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
self.keyfile = keyfile
self.certfile = certfile
self.context = context
self._prot_p = False
FTP.__init__(self, host, user, passwd, acct, timeout)
def login(self, user='', passwd='', acct='', secure=True):
if secure and not isinstance(self.sock, ssl.SSLSocket):
self.auth()
return FTP.login(self, user, passwd, acct)
def auth(self):
'''Set up secure control connection by using TLS/SSL.'''
if isinstance(self.sock, ssl.SSLSocket):
raise ValueError("Already using TLS")
if self.ssl_version == ssl.PROTOCOL_TLSv1:
resp = self.voidcmd('AUTH TLS')
else:
resp = self.voidcmd('AUTH SSL')
if self.context is not None:
self.sock = self.context.wrap_socket(self.sock)
else:
self.sock = ssl.wrap_socket(self.sock, self.keyfile,
self.certfile,
ssl_version=self.ssl_version)
self.file = self.sock.makefile(mode='r', encoding=self.encoding)
return resp
def prot_p(self):
'''Set up secure data connection.'''
# PROT defines whether or not the data channel is to be protected.
# Though RFC-2228 defines four possible protection levels,
# RFC-4217 only recommends two, Clear and Private.
# Clear (PROT C) means that no security is to be used on the
# data-channel, Private (PROT P) means that the data-channel
# should be protected by TLS.
# PBSZ command MUST still be issued, but must have a parameter of
# '0' to indicate that no buffering is taking place and the data
# connection should not be encapsulated.
self.voidcmd('PBSZ 0')
resp = self.voidcmd('PROT P')
self._prot_p = True
return resp
def prot_c(self):
'''Set up clear text data connection.'''
resp = self.voidcmd('PROT C')
self._prot_p = False
return resp
# --- Overridden FTP methods
def ntransfercmd(self, cmd, rest=None):
conn, size = FTP.ntransfercmd(self, cmd, rest)
if self._prot_p:
if self.context is not None:
conn = self.context.wrap_socket(conn)
else:
conn = ssl.wrap_socket(conn, self.keyfile, self.certfile,
ssl_version=self.ssl_version)
return conn, size
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
try:
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
fp = conn.makefile('r', encoding=self.encoding)
try:
while 1:
line = fp.readline()
if self.debugging > 2: print('*retr*', repr(line))
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
fp.close()
conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
try:
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
try:
while 1:
buf = fp.readline()
if not buf: break
if buf[-2:] != B_CRLF:
if buf[-1] in B_CRLF: buf = buf[:-1]
buf = buf + B_CRLF
conn.sendall(buf)
if callback: callback(buf)
# shutdown ssl layer
if isinstance(conn, ssl.SSLSocket):
conn.unwrap()
finally:
conn.close()
return self.voidresp()
def abort(self):
# overridden as we can't pass MSG_OOB flag to sendall()
line = b'ABOR' + B_CRLF
self.sock.sendall(line)
resp = self.getmultiline()
if resp[:3] not in {'426', '225', '226'}:
raise error_proto(resp)
return resp
__all__.append('FTP_TLS')
all_errors = (Error, IOError, EOFError, ssl.SSLError)
_150_re = None
def parse150(resp):
'''Parse the '150' response for a RETR request.
Returns the expected transfer size or None; size is not guaranteed to
be present in the 150 message.
'''
if resp[:3] != '150':
raise error_reply(resp)
global _150_re
if _150_re is None:
import re
_150_re = re.compile(
"150 .* \((\d+) bytes\)", re.IGNORECASE | re.ASCII)
m = _150_re.match(resp)
if not m:
return None
s = m.group(1)
try:
return int(s)
except (OverflowError, ValueError):
return int(s)
_227_re = None
def parse227(resp):
'''Parse the '227' response for a PASV request.
Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '227':
raise error_reply(resp)
global _227_re
if _227_re is None:
import re
_227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)', re.ASCII)
m = _227_re.search(resp)
if not m:
raise error_proto(resp)
numbers = m.groups()
host = '.'.join(numbers[:4])
port = (int(numbers[4]) << 8) + int(numbers[5])
return host, port
def parse229(resp, peer):
'''Parse the '229' response for a EPSV request.
Raises error_proto if it does not contain '(|||port|)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '229':
raise error_reply(resp)
left = resp.find('(')
if left < 0: raise error_proto(resp)
right = resp.find(')', left + 1)
if right < 0:
raise error_proto(resp) # should contain '(|||port|)'
if resp[left + 1] != resp[right - 1]:
raise error_proto(resp)
parts = resp[left + 1:right].split(resp[left+1])
if len(parts) != 5:
raise error_proto(resp)
host = peer[0]
port = int(parts[3])
return host, port
def parse257(resp):
'''Parse the '257' response for a MKD or PWD request.
This is a response to a MKD or PWD request: a directory name.
Returns the directoryname in the 257 reply.'''
if resp[:3] != '257':
raise error_reply(resp)
if resp[3:5] != ' "':
return '' # Not compliant to RFC 959, but UNIX ftpd does this
dirname = ''
i = 5
n = len(resp)
while i < n:
c = resp[i]
i = i+1
if c == '"':
if i >= n or resp[i] != '"':
break
i = i+1
dirname = dirname + c
return dirname
def print_line(line):
'''Default retrlines callback to print a line.'''
print(line)
def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
'''Copy file from one FTP-instance to another.'''
if not targetname: targetname = sourcename
type = 'TYPE ' + type
source.voidcmd(type)
target.voidcmd(type)
sourcehost, sourceport = parse227(source.sendcmd('PASV'))
target.sendport(sourcehost, sourceport)
# RFC 959: the user must "listen" [...] BEFORE sending the
# transfer request.
# So: STOR before RETR, because here the target is a "user".
treply = target.sendcmd('STOR ' + targetname)
if treply[:3] not in {'125', '150'}: raise error_proto # RFC 959
sreply = source.sendcmd('RETR ' + sourcename)
if sreply[:3] not in {'125', '150'}: raise error_proto # RFC 959
source.voidresp()
target.voidresp()
class Netrc:
"""Class to parse & provide access to 'netrc' format files.
See the netrc(4) man page for information on the file format.
WARNING: This class is obsolete -- use module netrc instead.
"""
__defuser = None
__defpasswd = None
__defacct = None
def __init__(self, filename=None):
if filename is None:
if "HOME" in os.environ:
filename = os.path.join(os.environ["HOME"],
".netrc")
else:
raise IOError("specify file to load or set $HOME")
self.__hosts = {}
self.__macros = {}
fp = open(filename, "r")
in_macro = 0
while 1:
line = fp.readline()
if not line: break
if in_macro and line.strip():
macro_lines.append(line)
continue
elif in_macro:
self.__macros[macro_name] = tuple(macro_lines)
in_macro = 0
words = line.split()
host = user = passwd = acct = None
default = 0
i = 0
while i < len(words):
w1 = words[i]
if i+1 < len(words):
w2 = words[i + 1]
else:
w2 = None
if w1 == 'default':
default = 1
elif w1 == 'machine' and w2:
host = w2.lower()
i = i + 1
elif w1 == 'login' and w2:
user = w2
i = i + 1
elif w1 == 'password' and w2:
passwd = w2
i = i + 1
elif w1 == 'account' and w2:
acct = w2
i = i + 1
elif w1 == 'macdef' and w2:
macro_name = w2
macro_lines = []
in_macro = 1
break
i = i + 1
if default:
self.__defuser = user or self.__defuser
self.__defpasswd = passwd or self.__defpasswd
self.__defacct = acct or self.__defacct
if host:
if host in self.__hosts:
ouser, opasswd, oacct = \
self.__hosts[host]
user = user or ouser
passwd = passwd or opasswd
acct = acct or oacct
self.__hosts[host] = user, passwd, acct
fp.close()
def get_hosts(self):
"""Return a list of hosts mentioned in the .netrc file."""
return self.__hosts.keys()
def get_account(self, host):
"""Returns login information for the named host.
The return value is a triple containing userid,
password, and the accounting field.
"""
host = host.lower()
user = passwd = acct = None
if host in self.__hosts:
user, passwd, acct = self.__hosts[host]
user = user or self.__defuser
passwd = passwd or self.__defpasswd
acct = acct or self.__defacct
return user, passwd, acct
def get_macros(self):
"""Return a list of all defined macro names."""
return self.__macros.keys()
def get_macro(self, macro):
"""Return a sequence of lines which define a named macro."""
return self.__macros[macro]
def test():
'''Test program.
Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-d dir
-l list
-p password
'''
if len(sys.argv) < 2:
print(test.__doc__)
sys.exit(0)
debugging = 0
rcfile = None
while sys.argv[1] == '-d':
debugging = debugging+1
del sys.argv[1]
if sys.argv[1][:2] == '-r':
# get name of alternate ~/.netrc file:
rcfile = sys.argv[1][2:]
del sys.argv[1]
host = sys.argv[1]
ftp = FTP(host)
ftp.set_debuglevel(debugging)
userid = passwd = acct = ''
try:
netrc = Netrc(rcfile)
except IOError:
if rcfile is not None:
sys.stderr.write("Could not open account file"
" -- using anonymous login.")
else:
try:
userid, passwd, acct = netrc.get_account(host)
except KeyError:
# no account for host
sys.stderr.write(
"No account -- using anonymous login.")
ftp.login(userid, passwd, acct)
for file in sys.argv[2:]:
if file[:2] == '-l':
ftp.dir(file[2:])
elif file[:2] == '-d':
cmd = 'CWD'
if file[2:]: cmd = cmd + ' ' + file[2:]
resp = ftp.sendcmd(cmd)
elif file == '-p':
ftp.set_pasv(not ftp.passiveserver)
else:
ftp.retrbinary('RETR ' + file, \
sys.stdout.write, 1024)
ftp.quit()
if __name__ == '__main__':
test()
import pandas as pd
import numpy as np
import pytest
import dask
import dask.dataframe as dd
from dask.dataframe._compat import tm, PANDAS_GT_100
from dask.dataframe.indexing import _coerce_loc_index
from dask.dataframe.utils import assert_eq, make_meta, PANDAS_VERSION
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 2, 1]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [0, 0, 0]}, index=[9, 9, 9]),
}
meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
d = dd.DataFrame(dsk, "x", meta, [0, 5, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
def test_loc():
assert d.loc[3:8].divisions[0] == 3
assert d.loc[3:8].divisions[-1] == 8
assert d.loc[5].divisions == (5, 5)
assert_eq(d.loc[5], full.loc[5:5])
assert_eq(d.loc[3:8], full.loc[3:8])
assert_eq(d.loc[:8], full.loc[:8])
assert_eq(d.loc[3:], full.loc[3:])
assert_eq(d.loc[[5]], full.loc[[5]])
expected_warning = FutureWarning
if not PANDAS_GT_100:
# removed in pandas 1.0
with pytest.warns(expected_warning):
assert_eq(d.loc[[3, 4, 1, 8]], full.loc[[3, 4, 1, 8]])
with pytest.warns(expected_warning):
assert_eq(d.loc[[3, 4, 1, 9]], full.loc[[3, 4, 1, 9]])
with pytest.warns(expected_warning):
assert_eq(d.loc[np.array([3, 4, 1, 9])], full.loc[np.array([3, 4, 1, 9])])
assert_eq(d.a.loc[5], full.a.loc[5:5])
assert_eq(d.a.loc[3:8], full.a.loc[3:8])
assert_eq(d.a.loc[:8], full.a.loc[:8])
assert_eq(d.a.loc[3:], full.a.loc[3:])
assert_eq(d.a.loc[[5]], full.a.loc[[5]])
if not PANDAS_GT_100:
# removed in pandas 1.0
with pytest.warns(expected_warning):
assert_eq(d.a.loc[[3, 4, 1, 8]], full.a.loc[[3, 4, 1, 8]])
with pytest.warns(expected_warning):
assert_eq(d.a.loc[[3, 4, 1, 9]], full.a.loc[[3, 4, 1, 9]])
with pytest.warns(expected_warning):
assert_eq(
d.a.loc[np.array([3, 4, 1, 9])], full.a.loc[np.array([3, 4, 1, 9])]
)
assert_eq(d.a.loc[[]], full.a.loc[[]])
assert_eq(d.a.loc[np.array([])], full.a.loc[np.array([])])
pytest.raises(KeyError, lambda: d.loc[1000])
assert_eq(d.loc[1000:], full.loc[1000:])
assert_eq(d.loc[-2000:-1000], full.loc[-2000:-1000])
assert sorted(d.loc[5].dask) == sorted(d.loc[5].dask)
assert sorted(d.loc[5].dask) != sorted(d.loc[6].dask)
def test_loc_non_informative_index():
df = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
ddf.divisions = (None,) * 3
assert not ddf.known_divisions
ddf.loc[20:30].compute(scheduler="sync")
assert_eq(ddf.loc[20:30], df.loc[20:30])
df = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 20, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
assert_eq(ddf.loc[20], df.loc[20:20])
def test_loc_with_text_dates():
A = dd._compat.makeTimeSeries().iloc[:5]
B = dd._compat.makeTimeSeries().iloc[5:]
s = dd.Series(
{("df", 0): A, ("df", 1): B},
"df",
A,
[A.index.min(), B.index.min(), B.index.max()],
)
assert s.loc["2000":"2010"].divisions == s.divisions
assert_eq(s.loc["2000":"2010"], s)
assert len(s.loc["2000-01-03":"2000-01-05"].compute()) == 3
def test_loc_with_series():
assert_eq(d.loc[d.a % 2 == 0], full.loc[full.a % 2 == 0])
assert sorted(d.loc[d.a % 2].dask) == sorted(d.loc[d.a % 2].dask)
assert sorted(d.loc[d.a % 2].dask) != sorted(d.loc[d.a % 3].dask)
def test_loc_with_array():
assert_eq(d.loc[(d.a % 2 == 0).values], full.loc[(full.a % 2 == 0).values])
assert sorted(d.loc[(d.a % 2).values].dask) == sorted(d.loc[(d.a % 2).values].dask)
assert sorted(d.loc[(d.a % 2).values].dask) != sorted(d.loc[(d.a % 3).values].dask)
def test_loc_with_function():
assert_eq(d.loc[lambda df: df["a"] > 3, :], full.loc[lambda df: df["a"] > 3, :])
def _col_loc_fun(_df):
return _df.columns.str.contains("b")
assert_eq(d.loc[:, _col_loc_fun], full.loc[:, _col_loc_fun])
def test_loc_with_array_different_partition():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc[(ddf.A > 0).values], df.loc[(df.A > 0).values])
with pytest.raises(ValueError):
ddf.loc[(ddf.A > 0).repartition(["a", "g", "k", "o", "t"]).values]
def test_loc_with_series_different_partition():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc[ddf.A > 0], df.loc[df.A > 0])
assert_eq(
ddf.loc[(ddf.A > 0).repartition(["a", "g", "k", "o", "t"])], df.loc[df.A > 0]
)
def test_loc2d():
# index indexer is always regarded as slice for duplicated values
assert_eq(d.loc[5, "a"], full.loc[5:5, "a"])
# assert_eq(d.loc[[5], 'a'], full.loc[[5], 'a'])
assert_eq(d.loc[5, ["a"]], full.loc[5:5, ["a"]])
# assert_eq(d.loc[[5], ['a']], full.loc[[5], ['a']])
assert_eq(d.loc[3:8, "a"], full.loc[3:8, "a"])
assert_eq(d.loc[:8, "a"], full.loc[:8, "a"])
assert_eq(d.loc[3:, "a"], full.loc[3:, "a"])
assert_eq(d.loc[[8], "a"], full.loc[[8], "a"])
assert_eq(d.loc[3:8, ["a"]], full.loc[3:8, ["a"]])
assert_eq(d.loc[:8, ["a"]], full.loc[:8, ["a"]])
assert_eq(d.loc[3:, ["a"]], full.loc[3:, ["a"]])
# 3d
with pytest.raises(pd.core.indexing.IndexingError):
d.loc[3, 3, 3]
# Series should raise
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[3, 3]
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[3:, 3]
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[d.a % 2 == 0, 3]
@pytest.mark.skip(PANDAS_GT_100, reason="Removed in pandas 1.0")
def test_loc2d_some_missing():
with pytest.warns(FutureWarning):
assert_eq(d.loc[[3, 4, 3], ["a"]], full.loc[[3, 4, 3], ["a"]])
def test_loc2d_with_known_divisions():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc["a", "A"], df.loc[["a"], "A"])
assert_eq(ddf.loc["a", ["A"]], df.loc[["a"], ["A"]])
assert_eq(ddf.loc["a":"o", "A"], df.loc["a":"o", "A"])
assert_eq(ddf.loc["a":"o", ["A"]], df.loc["a":"o", ["A"]])
assert_eq(ddf.loc[["n"], ["A"]], df.loc[["n"], ["A"]])
assert_eq(ddf.loc[["a", "c", "n"], ["A"]], df.loc[["a", "c", "n"], ["A"]])
assert_eq(ddf.loc[["t", "b"], ["A"]], df.loc[["t", "b"], ["A"]])
assert_eq(
ddf.loc[["r", "r", "c", "g", "h"], ["A"]],
df.loc[["r", "r", "c", "g", "h"], ["A"]],
)
def test_loc2d_with_unknown_divisions():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
ddf.divisions = (None,) * len(ddf.divisions)
assert ddf.known_divisions is False
assert_eq(ddf.loc["a", "A"], df.loc[["a"], "A"])
assert_eq(ddf.loc["a", ["A"]], df.loc[["a"], ["A"]])
assert_eq(ddf.loc["a":"o", "A"], df.loc["a":"o", "A"])
assert_eq(ddf.loc["a":"o", ["A"]], df.loc["a":"o", ["A"]])
def test_loc2d_duplicated_columns():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("AABCD"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc["a", "A"], df.loc[["a"], "A"])
assert_eq(ddf.loc["a", ["A"]], df.loc[["a"], ["A"]])
assert_eq(ddf.loc["j", "B"], df.loc[["j"], "B"])
assert_eq(ddf.loc["j", ["B"]], df.loc[["j"], ["B"]])
assert_eq(ddf.loc["a":"o", "A"], df.loc["a":"o", "A"])
assert_eq(ddf.loc["a":"o", ["A"]], df.loc["a":"o", ["A"]])
assert_eq(ddf.loc["j":"q", "B"], df.loc["j":"q", "B"])
assert_eq(ddf.loc["j":"q", ["B"]], df.loc["j":"q", ["B"]])
assert_eq(ddf.loc["a":"o", "B":"D"], df.loc["a":"o", "B":"D"])
assert_eq(ddf.loc["a":"o", "B":"D"], df.loc["a":"o", "B":"D"])
assert_eq(ddf.loc["j":"q", "B":"A"], df.loc["j":"q", "B":"A"])
assert_eq(ddf.loc["j":"q", "B":"A"], df.loc["j":"q", "B":"A"])
assert_eq(ddf.loc[ddf.B > 0, "B"], df.loc[df.B > 0, "B"])
assert_eq(ddf.loc[ddf.B > 0, ["A", "C"]], df.loc[df.B > 0, ["A", "C"]])
def test_getitem():
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
columns=list("ABC"),
)
ddf = dd.from_pandas(df, 2)
assert_eq(ddf["A"], df["A"])
# check cache consistency
tm.assert_series_equal(ddf["A"]._meta, ddf._meta["A"])
assert_eq(ddf[["A", "B"]], df[["A", "B"]])
tm.assert_frame_equal(ddf[["A", "B"]]._meta, ddf._meta[["A", "B"]])
assert_eq(ddf[ddf.C], df[df.C])
tm.assert_series_equal(ddf.C._meta, ddf._meta.C)
assert_eq(ddf[ddf.C.repartition([0, 2, 5, 8])], df[df.C])
pytest.raises(KeyError, lambda: df["X"])
pytest.raises(KeyError, lambda: df[["A", "X"]])
pytest.raises(AttributeError, lambda: df.X)
# not str/unicode
df = pd.DataFrame(np.random.randn(10, 5))
ddf = dd.from_pandas(df, 2)
assert_eq(ddf[0], df[0])
assert_eq(ddf[[1, 2]], df[[1, 2]])
pytest.raises(KeyError, lambda: df[8])
pytest.raises(KeyError, lambda: df[[1, 8]])
def test_getitem_slice():
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
index=list("abcdefghi"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf["a":"e"], df["a":"e"])
assert_eq(ddf["a":"b"], df["a":"b"])
assert_eq(ddf["f":], df["f":])
def test_getitem_integer_slice():
df = pd.DataFrame({"A": range(6)})
ddf = dd.from_pandas(df, 2)
# integer slicing is iloc based
with pytest.raises(NotImplementedError):
ddf[1:3]
df = pd.DataFrame({"A": range(6)}, index=[1.0, 2.0, 3.0, 5.0, 10.0, 11.0])
ddf = dd.from_pandas(df, 2)
# except for float dtype indexes
assert_eq(ddf[2:8], df[2:8])
assert_eq(ddf[2:], df[2:])
assert_eq(ddf[:8], df[:8])
def test_loc_on_numpy_datetimes():
df = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(np.datetime64, ["2014", "2015", "2016"]))
)
a = dd.from_pandas(df, 2)
a.divisions = list(map(np.datetime64, a.divisions))
assert_eq(a.loc["2014":"2015"], a.loc["2014":"2015"])
def test_loc_on_pandas_datetimes():
df = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(pd.Timestamp, ["2014", "2015", "2016"]))
)
a = dd.from_pandas(df, 2)
a.divisions = list(map(pd.Timestamp, a.divisions))
assert_eq(a.loc["2014":"2015"], a.loc["2014":"2015"])
def test_loc_datetime_no_freq():
# https://github.com/dask/dask/issues/2389
datetime_index = pd.date_range("2016-01-01", "2016-01-31", freq="12h")
datetime_index.freq = None # FORGET FREQUENCY
df = pd.DataFrame({"num": range(len(datetime_index))}, index=datetime_index)
ddf = dd.from_pandas(df, npartitions=1)
slice_ = slice("2016-01-03", "2016-01-05")
result = ddf.loc[slice_, :]
expected = df.loc[slice_, :]
assert_eq(result, expected)
def test_coerce_loc_index():
for t in [pd.Timestamp, np.datetime64]:
assert isinstance(_coerce_loc_index([t("2014")], "2014"), t)
def test_loc_timestamp_str():
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df.loc["2011-01-02"], ddf.loc["2011-01-02"])
assert_eq(df.loc["2011-01-02":"2011-01-10"], ddf.loc["2011-01-02":"2011-01-10"])
# same reso, dask result is always DataFrame
assert_eq(
df.loc["2011-01-02 10:00"].to_frame().T,
ddf.loc["2011-01-02 10:00"],
**CHECK_FREQ
)
# series
assert_eq(df.A.loc["2011-01-02"], ddf.A.loc["2011-01-02"], **CHECK_FREQ)
assert_eq(
df.A.loc["2011-01-02":"2011-01-10"],
ddf.A.loc["2011-01-02":"2011-01-10"],
**CHECK_FREQ
)
# slice with timestamp (dask result must be DataFrame)
assert_eq(
df.loc[pd.Timestamp("2011-01-02")].to_frame().T,
ddf.loc[pd.Timestamp("2011-01-02")],
**CHECK_FREQ
)
assert_eq(
df.loc[pd.Timestamp("2011-01-02") : pd.Timestamp("2011-01-10")],
ddf.loc[pd.Timestamp("2011-01-02") : pd.Timestamp("2011-01-10")],
**CHECK_FREQ
)
assert_eq(
df.loc[pd.Timestamp("2011-01-02 10:00")].to_frame().T,
ddf.loc[pd.Timestamp("2011-01-02 10:00")],
**CHECK_FREQ
)
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="M", periods=100),
)
ddf = dd.from_pandas(df, 50)
assert_eq(df.loc["2011-01"], ddf.loc["2011-01"])
assert_eq(df.loc["2011"], ddf.loc["2011"])
assert_eq(df.loc["2011-01":"2012-05"], ddf.loc["2011-01":"2012-05"])
assert_eq(df.loc["2011":"2015"], ddf.loc["2011":"2015"])
# series
assert_eq(df.B.loc["2011-01"], ddf.B.loc["2011-01"])
assert_eq(df.B.loc["2011"], ddf.B.loc["2011"])
assert_eq(df.B.loc["2011-01":"2012-05"], ddf.B.loc["2011-01":"2012-05"])
assert_eq(df.B.loc["2011":"2015"], ddf.B.loc["2011":"2015"])
def test_getitem_timestamp_str():
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df["2011-01-02"], ddf["2011-01-02"])
assert_eq(df["2011-01-02":"2011-01-10"], df["2011-01-02":"2011-01-10"])
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="D", periods=100),
)
ddf = dd.from_pandas(df, 50)
assert_eq(df["2011-01"], ddf["2011-01"])
assert_eq(df["2011"], ddf["2011"])
assert_eq(df["2011-01":"2012-05"], ddf["2011-01":"2012-05"])
assert_eq(df["2011":"2015"], ddf["2011":"2015"])
def test_loc_period_str():
# .loc with PeriodIndex doesn't support partial string indexing
# https://github.com/pydata/pandas/issues/13429
pass
def test_getitem_period_str():
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.period_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df["2011-01-02"], ddf["2011-01-02"])
assert_eq(df["2011-01-02":"2011-01-10"], df["2011-01-02":"2011-01-10"])
# same reso, dask result is always DataFrame
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.period_range("2011-01-01", freq="D", periods=100),
)
ddf = dd.from_pandas(df, 50)
assert_eq(df["2011-01"], ddf["2011-01"])
assert_eq(df["2011"], ddf["2011"])
assert_eq(df["2011-01":"2012-05"], ddf["2011-01":"2012-05"])
assert_eq(df["2011":"2015"], ddf["2011":"2015"])
def test_to_series():
# Test for time index
df = pd.DataFrame(
{"A": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_series(), ddf.index.to_series())
# Test for numerical index
df = pd.DataFrame({"A": np.random.randn(100)}, index=range(100))
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_series(), ddf.index.to_series())
def test_to_frame():
# Test for time index
df = pd.DataFrame(
{"A": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(), ddf.index.to_frame())
# Test for numerical index
df = pd.DataFrame({"A": np.random.randn(100)}, index=range(100))
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(), ddf.index.to_frame())
@pytest.mark.skipif(PANDAS_VERSION < "0.24.0", reason="No renaming for index")
def test_to_frame_name():
# Test for time index
df = pd.DataFrame(
{"A": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(name="foo"), ddf.index.to_frame(name="foo"))
# Test for numerical index
df = pd.DataFrame({"A": np.random.randn(100)}, index=range(100))
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(name="bar"), ddf.index.to_frame(name="bar"))
@pytest.mark.parametrize("indexer", [0, [0], [0, 1], [1, 0], [False, True, True]])
def test_iloc(indexer):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
result = ddf.iloc[:, indexer]
expected = df.iloc[:, indexer]
assert_eq(result, expected)
def test_iloc_series():
s = pd.Series([1, 2, 3])
ds = dd.from_pandas(s, 2)
with pytest.raises(AttributeError):
ds.iloc[:]
def test_iloc_raises():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
with pytest.raises(NotImplementedError):
ddf.iloc[[0, 1], :]
with pytest.raises(NotImplementedError):
ddf.iloc[[0, 1], [0, 1]]
with pytest.raises(ValueError):
ddf.iloc[[0, 1], [0, 1], [1, 2]]
with pytest.raises(IndexError):
ddf.iloc[:, [5, 6]]
def test_iloc_duplicate_columns():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
df.columns = ["A", "A", "C"]
ddf.columns = ["A", "A", "C"]
selection = ddf.iloc[:, 2]
# Check that `iloc` is called instead of getitem
assert any([key.startswith("iloc") for key in selection.dask.layers.keys()])
select_first = ddf.iloc[:, 1]
assert_eq(select_first, df.iloc[:, 1])
select_zeroth = ddf.iloc[:, 0]
assert_eq(select_zeroth, df.iloc[:, 0])
select_list_cols = ddf.iloc[:, [0, 2]]
assert_eq(select_list_cols, df.iloc[:, [0, 2]])
select_negative = ddf.iloc[:, -1:-3:-1]
assert_eq(select_negative, df.iloc[:, -1:-3:-1])
def test_iloc_dispatch_to_getitem():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
selection = ddf.iloc[:, 2]
assert all([not key.startswith("iloc") for key in selection.dask.layers.keys()])
assert any([key.startswith("getitem") for key in selection.dask.layers.keys()])
select_first = ddf.iloc[:, 1]
assert_eq(select_first, df.iloc[:, 1])
select_zeroth = ddf.iloc[:, 0]
assert_eq(select_zeroth, df.iloc[:, 0])
select_list_cols = ddf.iloc[:, [0, 2]]
assert_eq(select_list_cols, df.iloc[:, [0, 2]])
select_negative = ddf.iloc[:, -1:-3:-1]
assert_eq(select_negative, df.iloc[:, -1:-3:-1])
def test_iloc_out_of_order_selection():
df = pd.DataFrame({"A": [1] * 100, "B": [2] * 100, "C": [3] * 100, "D": [4] * 100})
ddf = dd.from_pandas(df, 2)
ddf = ddf[["C", "A", "B"]]
a = ddf.iloc[:, 0]
b = ddf.iloc[:, 1]
c = ddf.iloc[:, 2]
assert a.name == "C"
assert b.name == "A"
assert c.name == "B"
a1, b1, c1 = dask.compute(a, b, c)
assert a1.name == "C"
assert b1.name == "A"
assert c1.name == "B"
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class ScroogeGenTest(PantsRunIntegrationTest):
@classmethod
def hermetic(cls):
return True
def run_pants(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
full_config = {
'GLOBAL': {
'pythonpath': ["%(buildroot)s/contrib/scrooge/src/python"],
'backend_packages': ["pants.backend.codegen", "pants.backend.jvm", "pants.contrib.scrooge"]
},
'scala': { 'version': '2.11' },
'gen.scrooge': {
'service_deps': {
'java': [
'3rdparty:slf4j-api',
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:finagle-thrift',
'3rdparty/jvm/com/twitter:scrooge-core',
],
'scala': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:finagle-thrift',
'3rdparty/jvm/com/twitter:scrooge-core',
],
},
'service_exports': {
'java': [
'3rdparty:thrift-0.6.1',
],
'scala': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:finagle-thrift',
'3rdparty/jvm/com/twitter:scrooge-core',
]
},
'structs_deps': {
'java': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:scrooge-core',
],
'scala': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:scrooge-core',
],
},
'structs_exports': {
'java': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:scrooge-core',
],
'scala': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:scrooge-core',
],
}
}
}
if config:
for scope, scoped_cfgs in config.items():
updated = full_config.get(scope, {})
updated.update(scoped_cfgs)
full_config[scope] = updated
return super(ScroogeGenTest, self).run_pants(command, full_config, stdin_data, extra_env,
**kwargs)
@staticmethod
def thrift_test_target(name):
return 'contrib/scrooge/tests/thrift/org/pantsbuild/contrib/scrooge/scrooge_gen:' + name
def test_good(self):
# scrooge_gen should pass with correct thrift files.
cmd = ['gen', self.thrift_test_target('good-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_exports_of_thrift(self):
# Compiling against a thrift service with strict_deps=True should work
# because the necessary transitive dependencies will be exported.
cmd = ['compile', 'contrib/scrooge/tests/scala/org/pantsbuild/contrib/scrooge/scrooge_gen']
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_namespace_map(self):
# scrooge_gen should pass with namespace_map specified
cmd = ['gen', self.thrift_test_target('namespace-map-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_default_java_namespace(self):
# scrooge_gen should pass with default_java_namespace specified
cmd = ['gen', self.thrift_test_target('default-java-namespace-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_include_paths(self):
# scrooge_gen should pass with include_paths specified
cmd = ['gen', self.thrift_test_target('include-paths-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
from bson import json_util
from flask import Blueprint, render_template, request
from flask import Response
from datetime import datetime
from app import mongo
from app import utils
import json
mod_api = Blueprint('api', __name__, url_prefix='/api')
@mod_api.route('/', methods=['GET'])
def index():
''' Renders the App index page.
:return:
'''
return render_template('mod_importer/index.html')
@mod_api.route('/search', methods=['POST'])
def search():
params = request.json
# Format date
if 'date' in params:
params['to_date'] = datetime.strptime(params['date'].split('---')[1], '%m-%d-%Y')
params['from_date'] = datetime.strptime(params['date'].split('---')[0], '%m-%d-%Y')
result = {}
result['stats'] = utils.get_stats(params)
result['monthly-stats'] = utils.get_monthly_incidents_stats(params)
result['quarterly-stats'] = utils.get_quarterly_incidents_stats(params)
result['rank-stats'] = utils.get_rank_stats(params)
result['incident-stats'] = utils.get_incidents_stats(params)
result['violence-types'] = utils.get_violence_types(params)
result['daily-stats'] = utils.get_incident_types_by_time(params)
result['top-3'] = utils.get_top_3_stats(params)
result['map-victims-count'] = utils.get_map_victims_count(params)
result['census'] = utils.get_census_info(params)
result['raw-incident-stats'] = utils.get_raw_incidents(params)
result['rank-download-stats'] = utils.get_download_stats(params)
resp = Response(
response=json_util.dumps(result),
mimetype='application/json')
return resp
@mod_api.route('/get_total_victims_number////', methods=['GET'])
def get_victims(type, date=None, violence_type=None, name=None):
" Get incidents number based on given params."
if violence_type:
violence_type = violence_type.replace('-', '/')
if date:
from_date = datetime.strptime(date.split('---')[0], '%m-%d-%Y')
to_date = datetime.strptime(date.split('---')[1], '%m-%d-%Y')
match = None
group = None
if name != 'Bangladesh':
match = {
"$match": {
type: {
"$nin": [
""
],
"$in": [
name
]
},
'violence_type': {
"$in": [
str(violence_type)
]
},
"incident_date": {"$gte": from_date, "$lte": to_date}
}
}
else:
match = {
"$match": {
type: {
"$nin": [
""
]
},
"incident_date": {"$gte": from_date, "$lte": to_date}
}
}
if type == 'division':
group = {
"$group": {
"_id": {
'division': '$district'
},
"incidents": {
"$sum": 1
}
}
}
else:
group = {
"$group": {
"_id": {
type: '$' + type
},
"incidents": {
"$sum": 1
}
}
}
sort = {
"$sort": {
"incidents": -1
}
}
project = {
"$project": {
"_id": 0,
type: "$_id." + type,
"incidents": "$incidents"
}
}
aggregation = [match, group, sort, project]
result = mongo.db.mgr.aggregate(aggregation)
resp = Response(
response=json_util.dumps(result['result']),
mimetype='application/json')
return resp
@mod_api.route('//get/violence-types', methods=['GET', 'POST'])
def get_violence_types(dataset):
"Get all the violence types based on the given dataset."
violence_types = mongo.db[dataset].distinct('violence_type')
resp = Response(
response=json_util.dumps(violence_types),
mimetype='application/json')
return resp
@mod_api.route('/census//', methods=['GET', 'POST'])
def get_census_info(name, level):
"Get census info based on the given Division, District, Upazila."
census_info = None
if level == 0:
census_info = mongo.db.census.find_one({"division": name})
elif level == 1:
census_info = mongo.db.census.find_one({"district": name})
elif level == 2:
census_info = mongo.db.census.find_one({"upazila": name})
resp = Response(
response=json_util.dumps(census_info),
mimetype='application/json')
return resp
#!/usr/bin/env python
# @file: toolset/github_actions/github_actions_diff.py
# @author: Nate Brady
#
# @description: This script is only for use within Github Actions. It is meant
# to look through the commit history and determine whether or not the current
# framework test directory needs to be run. It compares the state of the PR
# branch against the target branch.
#
# Any changes found in the toolset/* directory other than continuous/*,
# github_actions/* and scaffolding/* will cause all tests to be run.
#
# The following commands can be put in commit messages to affect which tests
# will run:
#
# [ci skip] - Provided by Travis. Travis won't trigger any builds.
# [ci run-all] - This will force all tests to run.
# [ci fw-only Java/gemini JavaScript/nodejs] - Ensures that only Java/gemini and
# JavaScript/nodejs tests are run despite the detected changes.
# [ci fw Java/gemini] - Forces Java/gemini to run in addition to detected changes.
# [ci lang-only Java C++] - Ensures that only Java and C++ run despite detected changes.
# [ci lang Java C++] - Forces Java and C++ tests to run in addition to detected changes.
#
# If only a single test within a language group is forced to run, none of the
# other tests in that language group will run.
#
# The master branch will run the full suite of tests.
#
# IMPORTANT: the [ci *] commands must be added to every commit message. We do
# not look at previous commit messages. Make sure to keep your PR branch
# up-to-date with the target branch to avoid running unwanted tests.
import subprocess
import os
import re
def fw_found_in_changes(test, changes_output):
return re.search(
r"frameworks/" + re.escape(test) + "/",
changes_output, re.M)
# Cleans up diffing and grep output and into an array of strings
def clean_output(output):
return os.linesep.join([s for s in output.splitlines() if s])
def quit_diffing():
if len(run_tests):
print("github-actions-run-tests {!s}".format(" ".join(set(run_tests))))
else:
print("No tests to run.")
exit(0)
curr_branch = ""
is_PR = (os.getenv("PR_NUMBER") != "")
previous_commit = os.getenv("PREVIOUS_COMMIT")
diff_target = os.getenv("TARGET_BRANCH_NAME") if is_PR else previous_commit
if is_PR:
curr_branch = "HEAD"
# Also fetch master to compare against
subprocess.check_output(['bash', '-c', 'git fetch origin {0}:{0}'
.format(diff_target)])
else:
curr_branch = os.getenv("GITHUB_SHA")
# https://stackoverflow.com/questions/25071579/list-all-files-changed-in-a-pull-request-in-git-github
changes = clean_output(
subprocess.check_output([
'bash', '-c',
'git --no-pager diff --name-only {0} $(git merge-base {0} {1})'
.format(curr_branch, diff_target)
]))
print("Determining what to run based on the following file changes: \n{!s}"
.format('\n'.join(changes.split('\n')[0:10])))
if len(changes.split('\n')) > 10:
print("Too many files to show.")
# COMMIT MESSAGES:
# Before any complicated diffing, check for forced runs from the commit message
# Use -2 because travis now inserts a merge commit as the last commit
last_commit_msg = os.getenv("COMMIT_MESSAGE")
test_dirs = []
run_tests = []
# Break the test env variable down into test directories
if os.getenv("TESTLANG"):
dir = "frameworks/" + os.getenv("TESTLANG") + "/"
test_dirs = map(lambda x: os.getenv("TESTLANG") + "/" + x,
filter(lambda x: os.path.isdir(dir + x), os.listdir(dir)))
elif os.getenv("TESTDIR"):
test_dirs = os.getenv("TESTDIR").split(' ')
else:
def get_frameworks(test_lang):
dir = "frameworks/" + test_lang + "/"
return map(lambda x: test_lang + "/" + x,
filter(lambda x: os.path.isdir(dir + x),
os.listdir(dir)))
test_dirs = []
for frameworks in map(get_frameworks, os.listdir("frameworks")):
for framework in frameworks:
test_dirs.append(framework)
# Forced full run
if re.search(r'\[ci run-all\]', last_commit_msg, re.M):
print("All tests have been forced to run from the commit message.")
run_tests = test_dirs
quit_diffing()
# Forced *fw-only* specific tests
if re.search(r'\[ci fw-only .+\]', last_commit_msg, re.M):
tests = re.findall(r'\[ci fw-only (.+)\]', last_commit_msg, re.M)[0].strip().split(' ')
for test in tests:
if test in test_dirs:
print("{!s} has been forced to run from the commit message.".format(test))
run_tests.append(test)
# quit here because we're using "only"
quit_diffing()
# Forced *lang-only* specific tests
if re.search(r'\[ci lang-only .+\]', last_commit_msg, re.M):
langs = re.findall(r'\[ci lang-only (.+)\]', last_commit_msg, re.M)[0].strip().split(' ')
for test in test_dirs:
for lang in langs:
if test.startswith(lang + "/"):
print("{!s} has been forced to run from the commit message.".format(test))
run_tests.append(test)
# quit here because we're using "only"
quit_diffing()
# Forced framework run in addition to other tests
if re.search(r'\[ci fw .+\]', last_commit_msg, re.M):
tests = re.findall(r'\[ci fw (.+)\]', last_commit_msg, re.M)[0].strip().split(' ')
for test in tests:
if test in test_dirs:
print("{!s} has been forced to run from the commit message.".format(test))
run_tests.append(test)
# Forced lang run in addition to other running tests
if re.search(r'\[ci lang .+\]', last_commit_msg, re.M):
langs = re.findall(r'\[ci lang (.+)\]', last_commit_msg, re.M)[0].strip().split(' ')
for test in test_dirs:
for lang in langs:
if test.startswith(lang + "/"):
print("{!s} has been forced to run from the commit message.".format(test))
run_tests.append(test)
# Ignore travis, github_actions, continuous and scaffolding changes
if re.search(r'^toolset\/(?!(travis\/|github_actions\/|continuous\/|scaffolding\/))', changes, re.M) is not None:
print("Found changes to core toolset. Running all tests.")
run_tests = test_dirs
quit_diffing()
for test in test_dirs:
if fw_found_in_changes(test, changes):
print("Found changes that affect {!s}".format(test))
run_tests.append(test)
quit_diffing()
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^render_to_response/$', views.render_to_response_view),
url(r'^render_to_response/multiple_templates/$', views.render_to_response_view_with_multiple_templates),
url(r'^render_to_response/request_context/$', views.render_to_response_view_with_request_context),
url(r'^render_to_response/content_type/$', views.render_to_response_view_with_content_type),
url(r'^render_to_response/dirs/$', views.render_to_response_view_with_dirs),
url(r'^render_to_response/status/$', views.render_to_response_view_with_status),
url(r'^render_to_response/using/$', views.render_to_response_view_with_using),
url(r'^render_to_response/context_instance_misuse/$', views.render_to_response_with_context_instance_misuse),
url(r'^render/$', views.render_view),
url(r'^render/multiple_templates/$', views.render_view_with_multiple_templates),
url(r'^render/base_context/$', views.render_view_with_base_context),
url(r'^render/content_type/$', views.render_view_with_content_type),
url(r'^render/dirs/$', views.render_with_dirs),
url(r'^render/status/$', views.render_view_with_status),
url(r'^render/using/$', views.render_view_with_using),
url(r'^render/current_app/$', views.render_view_with_current_app),
url(r'^render/current_app_conflict/$', views.render_view_with_current_app_conflict),
]
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Show dll dependencies of executable files or other dynamic libraries.
"""
import glob
import argparse
import PyInstaller.depend.bindepend
from PyInstaller.compat import is_win
import PyInstaller.log
def run():
parser = argparse.ArgumentParser()
PyInstaller.log.__add_options(parser)
parser.add_argument('filenames', nargs='+',
metavar='executable-or-dynamic-library',
help=("executables or dynamic libraries for which "
"the dependencies should be shown"))
args = parser.parse_args()
PyInstaller.log.__process_options(parser, args)
# Suppress all informative messages from the dependency code.
PyInstaller.log.getLogger('PyInstaller.build.bindepend').setLevel(
PyInstaller.log.WARN)
try:
for a in args.filenames:
for fn in glob.glob(a):
imports = PyInstaller.depend.bindepend.getImports(fn)
if is_win:
assemblies = PyInstaller.depend.bindepend.getAssemblies(fn)
imports.update([a.getid() for a in assemblies])
print(fn, imports)
except KeyboardInterrupt:
raise SystemExit("Aborted by user request.")
if __name__ == '__main__':
run()
# ==============================================================================
# purpose: create statistical distribution objects, and find shortest paths
# author:
# created: 6/6/15
# revised:
# comments:
# 1. This is based on the object graph on Wikipedia.
# 2. The idea is to create a graph of the various continuous statistical distributions,
# plot them, and traverse the graph to find the shortest distance paths.
# 3. Could use the Python module objgraph to do this, where the nodes are the distributions
# but inheritance is the only relationship here, so cannot have the relationships that we need
# such as deterministic relationships, and approximate relationships.
# 4. How does an ontology fit into the picture.
#==============================================================================
import scipy.stats as sps
import math
class StatisticalDistribution(object):
def __init__(self):
pass
def compute_percentile(self, percentile):
pass
class NormalDistribution(StatisticalDistribution):
def __init__(self, mean=0, var=1):
self.mean = mean
self.var = var
def compute_percentile(self, percentile=0.5):
rv = sps.norm(loc=self.mean, scale=math.sqrt(self.var))
return rv.ppf(percentile)
class LogNormalDistribution(StatisticalDistribution):
def __init__(self, mean=0, var=1):
self.mean = mean
self.var = var
def compute_percentile(self, percentile=0.5):
rv = sps.lognorm(s=math.sqrt(self.var), scale=math.exp(self.mean))
return rv.ppf(percentile)
x = NormalDistribution(mean=3, var=4)
x.compute_percentile()
y = LogNormalDistribution(mean=3, var=4)
y.compute_percentile()
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
class PodomaticIE(InfoExtractor):
IE_NAME = 'podomatic'
_VALID_URL = r'^(?Phttps?)://(?P[^.]+)\.podomatic\.com/entry/(?P[^?]+)'
_TESTS = [
{
'url': 'http://scienceteachingtips.podomatic.com/entry/2009-01-02T16_03_35-08_00',
'md5': '84bb855fcf3429e6bf72460e1eed782d',
'info_dict': {
'id': '2009-01-02T16_03_35-08_00',
'ext': 'mp3',
'uploader': 'Science Teaching Tips',
'uploader_id': 'scienceteachingtips',
'title': '64. When the Moon Hits Your Eye',
'duration': 446,
}
},
{
'url': 'http://ostbahnhof.podomatic.com/entry/2013-11-15T16_31_21-08_00',
'md5': 'd2cf443931b6148e27638650e2638297',
'info_dict': {
'id': '2013-11-15T16_31_21-08_00',
'ext': 'mp3',
'uploader': 'Ostbahnhof / Techno Mix',
'uploader_id': 'ostbahnhof',
'title': 'Einunddreizig',
'duration': 3799,
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
channel = mobj.group('channel')
json_url = (('%s://%s.podomatic.com/entry/embed_params/%s' +
'?permalink=true&rtmp=0') %
(mobj.group('proto'), channel, video_id))
data_json = self._download_webpage(
json_url, video_id, 'Downloading video info')
data = json.loads(data_json)
video_url = data['downloadLink']
if not video_url:
video_url = '%s/%s' % (data['streamer'].replace('rtmp', 'http'), data['mediaLocation'])
uploader = data['podcast']
title = data['title']
thumbnail = data['imageLocation']
duration = int_or_none(data.get('length'), 1000)
return {
'id': video_id,
'url': video_url,
'title': title,
'uploader': uploader,
'uploader_id': channel,
'thumbnail': thumbnail,
'duration': duration,
}
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework import BitcoinTestFramework
from util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def run_test (self):
BitcoinTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].setgenerate (True, 10);
self.nodes[2].setgenerate (True, 20);
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
# encoding: utf-8
"""Series-related objects."""
from __future__ import absolute_import, print_function, unicode_literals
from collections import Sequence
from ..dml.chtfmt import ChartFormat
from .marker import Marker
from ..oxml.ns import qn
from .point import BubblePoints, CategoryPoints, XyPoints
from ..util import lazyproperty
class _BaseSeries(object):
"""
Base class for |BarSeries| and other series classes.
"""
def __init__(self, ser):
super(_BaseSeries, self).__init__()
self._element = ser
self._ser = ser
@lazyproperty
def format(self):
"""
The |ChartFormat| instance for this series, providing access to shape
properties such as fill and line.
"""
return ChartFormat(self._ser)
@property
def index(self):
"""
The zero-based integer index of this series as reported in its
`c:ser/c:idx` element.
"""
return self._element.idx.val
@property
def name(self):
"""
The string label given to this series, appears as the title of the
column for this series in the Excel worksheet. It also appears as the
label for this series in the legend.
"""
names = self._element.xpath('./c:tx//c:pt/c:v/text()')
name = names[0] if names else ''
return name
class _BaseCategorySeries(_BaseSeries):
"""
Base class for |BarSeries| and other category chart series classes.
"""
@lazyproperty
def points(self):
"""
The |CategoryPoints| object providing access to individual data
points in this series.
"""
return CategoryPoints(self._ser)
@property
def values(self):
"""
Read-only. A sequence containing the float values for this series, in
the order they appear on the chart.
"""
def iter_values():
val = self._element.val
if val is None:
return
for idx in range(val.ptCount_val):
yield val.pt_v(idx)
return tuple(iter_values())
class _MarkerMixin(object):
"""
Mixin class providing `.marker` property for line-type chart series. The
line-type charts are Line, XY, and Radar.
"""
@lazyproperty
def marker(self):
"""
The |Marker| instance for this series, providing access to data point
marker properties such as fill and line. Setting these properties
determines the appearance of markers for all points in this series
that are not overridden by settings at the point level.
"""
return Marker(self._ser)
class AreaSeries(_BaseCategorySeries):
"""
A data point series belonging to an area plot.
"""
class BarSeries(_BaseCategorySeries):
"""A data point series belonging to a bar plot."""
@property
def invert_if_negative(self):
"""
|True| if a point having a value less than zero should appear with a
fill different than those with a positive value. |False| if the fill
should be the same regardless of the bar's value. When |True|, a bar
with a solid fill appears with white fill; in a bar with gradient
fill, the direction of the gradient is reversed, e.g. dark -> light
instead of light -> dark. The term "invert" here should be understood
to mean "invert the *direction* of the *fill gradient*".
"""
invertIfNegative = self._element.invertIfNegative
if invertIfNegative is None:
return True
return invertIfNegative.val
@invert_if_negative.setter
def invert_if_negative(self, value):
invertIfNegative = self._element.get_or_add_invertIfNegative()
invertIfNegative.val = value
class LineSeries(_BaseCategorySeries, _MarkerMixin):
"""
A data point series belonging to a line plot.
"""
@property
def smooth(self):
"""
Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points.
"""
smooth = self._element.smooth
if smooth is None:
return True
return smooth.val
@smooth.setter
def smooth(self, value):
self._element.get_or_add_smooth().val = value
class PieSeries(_BaseCategorySeries):
"""
A data point series belonging to a pie plot.
"""
class RadarSeries(_BaseCategorySeries, _MarkerMixin):
"""
A data point series belonging to a radar plot.
"""
class XySeries(_BaseSeries, _MarkerMixin):
"""
A data point series belonging to an XY (scatter) plot.
"""
def iter_values(self):
"""
Generate each float Y value in this series, in the order they appear
on the chart. A value of `None` represents a missing Y value
(corresponding to a blank Excel cell).
"""
yVal = self._element.yVal
if yVal is None:
return
for idx in range(yVal.ptCount_val):
yield yVal.pt_v(idx)
@lazyproperty
def points(self):
"""
The |XyPoints| object providing access to individual data points in
this series.
"""
return XyPoints(self._ser)
@property
def values(self):
"""
Read-only. A sequence containing the float values for this series, in
the order they appear on the chart.
"""
return tuple(self.iter_values())
class BubbleSeries(XySeries):
"""
A data point series belonging to a bubble plot.
"""
@lazyproperty
def points(self):
"""
The |BubblePoints| object providing access to individual data point
objects used to discover and adjust the formatting and data labels of
a data point.
"""
return BubblePoints(self._ser)
class SeriesCollection(Sequence):
"""
A sequence of |Series| objects.
"""
def __init__(self, parent_elm):
# *parent_elm* can be either a c:plotArea or xChart element
super(SeriesCollection, self).__init__()
self._element = parent_elm
def __getitem__(self, index):
ser = self._element.sers[index]
return _SeriesFactory(ser)
def __len__(self):
return len(self._element.sers)
def _SeriesFactory(ser):
"""
Return an instance of the appropriate subclass of _BaseSeries based on the
xChart element *ser* appears in.
"""
xChart_tag = ser.getparent().tag
try:
SeriesCls = {
qn('c:areaChart'): AreaSeries,
qn('c:barChart'): BarSeries,
qn('c:bubbleChart'): BubbleSeries,
qn('c:doughnutChart'): PieSeries,
qn('c:lineChart'): LineSeries,
qn('c:pieChart'): PieSeries,
qn('c:radarChart'): RadarSeries,
qn('c:scatterChart'): XySeries,
}[xChart_tag]
except KeyError:
raise NotImplementedError(
'series class for %s not yet implemented' % xChart_tag
)
return SeriesCls(ser)
# Copyright (c) 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
from webob import exc
from jacket.api.compute.openstack.compute import block_device_mapping_v1 \
as block_device_mapping
from jacket.api.compute.openstack.compute import extension_info
from jacket.api.compute.openstack.compute.legacy_v2 import extensions
from jacket.api.compute.openstack.compute.legacy_v2 import servers as servers_v2
from jacket.api.compute.openstack.compute import servers as servers_v21
from jacket.compute.cloud import api as compute_api
from jacket.db import compute
from jacket.compute import exception
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
from jacket.tests.compute.unit.image import fake
CONF = cfg.CONF
class BlockDeviceMappingTestV21(test.TestCase):
validation_error = exception.ValidationError
def _setup_controller(self):
ext_info = extension_info.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
'osapi_v21')
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist',
['os-block-device-mapping-v1',
'os-block-device-mapping'],
'osapi_v21')
self.no_volumes_controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', '', 'osapi_v21')
def setUp(self):
super(BlockDeviceMappingTestV21, self).setUp()
self._setup_controller()
fake.stub_out_image_service(self)
self.volume_id = fakes.FAKE_UUID
self.bdm = [{
'no_device': None,
'virtual_name': 'root',
'volume_id': self.volume_id,
'device_name': 'vda',
'delete_on_termination': False
}]
def _get_servers_body(self, no_image=False):
body = {
'server': {
'name': 'server_test',
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'flavorRef': 'http://localhost/123/flavors/3',
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
if no_image:
del body['server']['imageRef']
return body
def _test_create(self, params, no_image=False, override_controller=None):
body = self._get_servers_body(no_image)
body['server'].update(params)
req = fakes.HTTPRequest.blank('/v2/fake/servers')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
if override_controller:
override_controller.create(req, body=body).obj['server']
else:
self.controller.create(req, body=body).obj['server']
def test_create_instance_with_volumes_enabled(self):
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self._test_create(params)
def test_create_instance_with_volumes_enabled_and_bdms_no_image(self):
"""Test that the create works if there is no image supplied but
os-volumes extension is enabled and bdms are supplied
"""
self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
volume = {
'id': 1,
'status': 'active',
'volume_image_metadata':
{'test_key': 'test_value'}
}
compute_api.API._validate_bdm(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(True)
compute_api.API._get_bdm_image_metadata(mox.IgnoreArg(),
self.bdm,
True).AndReturn(volume)
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
self.assertNotIn('imageRef', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.mox.ReplayAll()
self._test_create(params, no_image=True)
def test_create_instance_with_volumes_disabled(self):
bdm = [{'device_name': 'foo'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn(block_device_mapping, kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create(params,
override_controller=self.no_volumes_controller)
@mock.patch('compute.compute.api.API._get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
bdm = [{
'volume_id': self.volume_id,
'device_name': 'vda'
}]
params = {'block_device_mapping': bdm}
fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
def test_create_instance_with_device_name_not_string(self):
self.bdm[0]['device_name'] = 123
old_create = compute_api.API.create
self.params = {'block_device_mapping': self.bdm}
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_snapshot_volume_id_none(self):
old_create = compute_api.API.create
bdm = [{
'no_device': None,
'snapshot_id': None,
'volume_id': None,
'device_name': 'vda',
'delete_on_termination': False
}]
self.params = {'block_device_mapping': bdm}
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, self.params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_bdm_param_not_list(self, mock_create):
self.params = {'block_device_mapping': '/dev/vdb'}
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_device_name_too_long(self):
self.bdm[0]['device_name'] = 'a' * 256,
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_space_in_device_name(self):
self.bdm[0]['device_name'] = 'vd a',
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertTrue(kwargs['legacy_bdm'])
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def _test_create_instance_with_size_error(self, size):
bdm = [{'delete_on_termination': True,
'device_name': 'vda',
'volume_size': size,
'volume_id': '11111111-1111-1111-1111-111111111111'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_invalid_size(self):
self._test_create_instance_with_size_error("hello world")
def test_create_instance_with_size_empty_string(self):
self._test_create_instance_with_size_error('')
def test_create_instance_with_size_zero(self):
self._test_create_instance_with_size_error("0")
def test_create_instance_with_size_greater_than_limit(self):
self._test_create_instance_with_size_error(compute.MAX_INT + 1)
def test_create_instance_with_bdm_delete_on_termination(self):
bdm = [{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'True'},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo3', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'invalid'},
{'device_name': 'foo4', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo5', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False}]
expected_bdm = [
{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo3', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo4', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo5', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(expected_bdm, kwargs['block_device_mapping'])
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self._test_create(params)
def test_create_instance_decide_format_legacy(self):
ext_info = extension_info.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist',
['os-block-device-mapping',
'os-block-device-mapping-v1'],
'osapi_v21')
controller = servers_v21.ServersController(extension_info=ext_info)
bdm = [{'device_name': 'foo1',
'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True}]
expected_legacy_flag = True
old_create = compute_api.API.create
def create(*args, **kwargs):
legacy_bdm = kwargs.get('legacy_bdm', True)
self.assertEqual(legacy_bdm, expected_legacy_flag)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm',
_validate_bdm)
self._test_create({}, override_controller=controller)
params = {'block_device_mapping': bdm}
self._test_create(params, override_controller=controller)
def test_create_instance_both_bdm_formats(self):
ext_info = extension_info.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist', '', 'osapi_v21')
both_controllers = servers_v21.ServersController(
extension_info=ext_info)
bdm = [{'device_name': 'foo'}]
bdm_v2 = [{'source_type': 'volume',
'uuid': 'fake_vol'}]
params = {'block_device_mapping': bdm,
'block_device_mapping_v2': bdm_v2}
self.assertRaises(exc.HTTPBadRequest, self._test_create, params,
override_controller=both_controllers)
class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
validation_error = exc.HTTPBadRequest
def _setup_controller(self):
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {'os-volumes': 'fake'}
self.controller = servers_v2.Controller(self.ext_mgr)
self.ext_mgr_no_vols = extensions.ExtensionManager()
self.ext_mgr_no_vols.extensions = {}
self.no_volumes_controller = servers_v2.Controller(
self.ext_mgr_no_vols)
def test_create_instance_with_volumes_disabled(self):
bdm = [{'device_name': 'foo'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['block_device_mapping'])
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create(params,
override_controller=self.no_volumes_controller)
def test_create_instance_decide_format_legacy(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-volumes': 'fake',
'os-block-device-mapping-v2-boot': 'fake'}
controller = servers_v2.Controller(self.ext_mgr)
bdm = [{'device_name': 'foo1',
'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 1}]
expected_legacy_flag = True
old_create = compute_api.API.create
def create(*args, **kwargs):
legacy_bdm = kwargs.get('legacy_bdm', True)
self.assertEqual(legacy_bdm, expected_legacy_flag)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm',
_validate_bdm)
self._test_create({}, override_controller=controller)
params = {'block_device_mapping': bdm}
self._test_create(params, override_controller=controller)
def test_create_instance_with_size_empty_string(self):
# Add a check whether the size is an empty string
# in V2.1 API only. So this test is skipped in V2.0 API
pass
def test_create_instance_with_size_zero(self):
# Add a check whether the size is zero in V2.1 API only.
# So this test is skipped in V2.0 API
pass
def test_create_instance_with_size_greater_than_limit(self):
# Add a check whether size is greater than the limit
# in V2.1 API only. So this test is skipped in V2.0 API
pass
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import random
from openerp.addons.base_geolocalize.models.res_partner import geo_find, geo_query_address
from openerp.osv import osv
from openerp.osv import fields
class res_partner_grade(osv.osv):
_order = 'sequence'
_name = 'res.partner.grade'
_columns = {
'sequence': fields.integer('Sequence'),
'active': fields.boolean('Active'),
'name': fields.char('Level Name'),
'partner_weight': fields.integer('Level Weight',
help="Gives the probability to assign a lead to this partner. (0 means no assignation.)"),
}
_defaults = {
'active': lambda *args: 1,
'partner_weight':1
}
class res_partner_activation(osv.osv):
_name = 'res.partner.activation'
_order = 'sequence'
_columns = {
'sequence' : fields.integer('Sequence'),
'name' : fields.char('Name', required=True),
}
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'partner_weight': fields.integer('Level Weight',
help="Gives the probability to assign a lead to this partner. (0 means no assignation.)"),
'grade_id': fields.many2one('res.partner.grade', 'Level'),
'activation' : fields.many2one('res.partner.activation', 'Activation', select=1),
'date_partnership' : fields.date('Partnership Date'),
'date_review' : fields.date('Latest Partner Review'),
'date_review_next' : fields.date('Next Partner Review'),
# customer implementation
'assigned_partner_id': fields.many2one(
'res.partner', 'Implemented by',
),
'implemented_partner_ids': fields.one2many(
'res.partner', 'assigned_partner_id',
string='Implementation References',
),
}
_defaults = {
'partner_weight': lambda *args: 0
}
def onchange_grade_id(self, cr, uid, ids, grade_id, context=None):
res = {'value' :{'partner_weight':0}}
if grade_id:
partner_grade = self.pool.get('res.partner.grade').browse(cr, uid, grade_id)
res['value']['partner_weight'] = partner_grade.partner_weight
return res
class crm_lead(osv.osv):
_inherit = "crm.lead"
_columns = {
'partner_latitude': fields.float('Geo Latitude', digits=(16, 5)),
'partner_longitude': fields.float('Geo Longitude', digits=(16, 5)),
'partner_assigned_id': fields.many2one('res.partner', 'Assigned Partner',track_visibility='onchange' , help="Partner this case has been forwarded/assigned to.", select=True),
'date_assign': fields.date('Assignation Date', help="Last date this case was forwarded/assigned to a partner"),
}
def _merge_data(self, cr, uid, ids, oldest, fields, context=None):
fields += ['partner_latitude', 'partner_longitude', 'partner_assigned_id', 'date_assign']
return super(crm_lead, self)._merge_data(cr, uid, ids, oldest, fields, context=context)
def onchange_assign_id(self, cr, uid, ids, partner_assigned_id, context=None):
"""This function updates the "assignation date" automatically, when manually assign a partner in the geo assign tab
"""
if not partner_assigned_id:
return {'value':{'date_assign': False}}
else:
partners = self.pool.get('res.partner').browse(cr, uid, [partner_assigned_id], context=context)
user_id = partners[0] and partners[0].user_id.id or False
return {'value':
{'date_assign': fields.date.context_today(self,cr,uid,context=context),
'user_id' : user_id}
}
def action_assign_partner(self, cr, uid, ids, context=None):
return self.assign_partner(cr, uid, ids, partner_id=False, context=context)
def assign_partner(self, cr, uid, ids, partner_id=False, context=None):
partner_ids = {}
res = False
res_partner = self.pool.get('res.partner')
if not partner_id:
partner_ids = self.search_geo_partner(cr, uid, ids, context=context)
for lead in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = partner_ids.get(lead.id, False)
if not partner_id:
continue
self.assign_geo_localize(cr, uid, [lead.id], lead.partner_latitude, lead.partner_longitude, context=context)
partner = res_partner.browse(cr, uid, partner_id, context=context)
if partner.user_id:
salesteam_id = partner.team_id and partner.team_id.id or False
self.allocate_salesman(cr, uid, [lead.id], [partner.user_id.id], team_id=salesteam_id, context=context)
self.write(cr, uid, [lead.id], {'date_assign': fields.date.context_today(self,cr,uid,context=context), 'partner_assigned_id': partner_id}, context=context)
return res
def assign_geo_localize(self, cr, uid, ids, latitude=False, longitude=False, context=None):
if latitude and longitude:
self.write(cr, uid, ids, {
'partner_latitude': latitude,
'partner_longitude': longitude
}, context=context)
return True
# Don't pass context to browse()! We need country name in english below
for lead in self.browse(cr, uid, ids):
if lead.partner_latitude and lead.partner_longitude:
continue
if lead.country_id:
result = geo_find(geo_query_address(street=lead.street,
zip=lead.zip,
city=lead.city,
state=lead.state_id.name,
country=lead.country_id.name))
if result:
self.write(cr, uid, [lead.id], {
'partner_latitude': result[0],
'partner_longitude': result[1]
}, context=context)
return True
def search_geo_partner(self, cr, uid, ids, context=None):
res_partner = self.pool.get('res.partner')
res_partner_ids = {}
self.assign_geo_localize(cr, uid, ids, context=context)
for lead in self.browse(cr, uid, ids, context=context):
partner_ids = []
if not lead.country_id:
continue
latitude = lead.partner_latitude
longitude = lead.partner_longitude
if latitude and longitude:
# 1. first way: in the same country, small area
partner_ids = res_partner.search(cr, uid, [
('partner_weight', '>', 0),
('partner_latitude', '>', latitude - 2), ('partner_latitude', '<', latitude + 2),
('partner_longitude', '>', longitude - 1.5), ('partner_longitude', '<', longitude + 1.5),
('country_id', '=', lead.country_id.id),
], context=context)
# 2. second way: in the same country, big area
if not partner_ids:
partner_ids = res_partner.search(cr, uid, [
('partner_weight', '>', 0),
('partner_latitude', '>', latitude - 4), ('partner_latitude', '<', latitude + 4),
('partner_longitude', '>', longitude - 3), ('partner_longitude', '<' , longitude + 3),
('country_id', '=', lead.country_id.id),
], context=context)
# 3. third way: in the same country, extra large area
if not partner_ids:
partner_ids = res_partner.search(cr, uid, [
('partner_weight','>', 0),
('partner_latitude','>', latitude - 8), ('partner_latitude','<', latitude + 8),
('partner_longitude','>', longitude - 8), ('partner_longitude','<', longitude + 8),
('country_id', '=', lead.country_id.id),
], context=context)
# 5. fifth way: anywhere in same country
if not partner_ids:
# still haven't found any, let's take all partners in the country!
partner_ids = res_partner.search(cr, uid, [
('partner_weight', '>', 0),
('country_id', '=', lead.country_id.id),
], context=context)
# 6. sixth way: closest partner whatsoever, just to have at least one result
if not partner_ids:
# warning: point() type takes (longitude, latitude) as parameters in this order!
cr.execute("""SELECT id, distance
FROM (select id, (point(partner_longitude, partner_latitude) <-> point(%s,%s)) AS distance FROM res_partner
WHERE active
AND partner_longitude is not null
AND partner_latitude is not null
AND partner_weight > 0) AS d
ORDER BY distance LIMIT 1""", (longitude, latitude))
res = cr.dictfetchone()
if res:
partner_ids.append(res['id'])
total_weight = 0
toassign = []
for partner in res_partner.browse(cr, uid, partner_ids, context=context):
total_weight += partner.partner_weight
toassign.append( (partner.id, total_weight) )
random.shuffle(toassign) # avoid always giving the leads to the first ones in db natural order!
nearest_weight = random.randint(0, total_weight)
for partner_id, weight in toassign:
if nearest_weight <= weight:
res_partner_ids[lead.id] = partner_id
break
return res_partner_ids
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers.tables \
import MembersTable
from openstack_dashboard.dashboards.project.loadbalancers.tables \
import MonitorsTable
from openstack_dashboard.dashboards.project.loadbalancers.tables \
import PoolsTable
class PoolsTab(tabs.TableTab):
table_classes = (PoolsTable,)
name = _("Pools")
slug = "pools"
template_name = "horizon/common/_detail_table.html"
def get_poolstable_data(self):
try:
pools = api.lbaas.pools_get(self.tab_group.request)
poolsFormatted = [p.readable(self.tab_group.request) for
p in pools]
except:
poolsFormatted = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve pools list.'))
return poolsFormatted
class MembersTab(tabs.TableTab):
table_classes = (MembersTable,)
name = _("Members")
slug = "members"
template_name = "horizon/common/_detail_table.html"
def get_memberstable_data(self):
try:
members = api.lbaas.members_get(self.tab_group.request)
membersFormatted = [m.readable(self.tab_group.request) for
m in members]
except:
membersFormatted = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve member list.'))
return membersFormatted
class MonitorsTab(tabs.TableTab):
table_classes = (MonitorsTable,)
name = _("Monitors")
slug = "monitors"
template_name = "horizon/common/_detail_table.html"
def get_monitorstable_data(self):
try:
monitors = api.lbaas.pool_health_monitors_get(
self.tab_group.request)
except:
monitors = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve monitor list.'))
return monitors
class LoadBalancerTabs(tabs.TabGroup):
slug = "lbtabs"
tabs = (PoolsTab, MembersTab, MonitorsTab)
sticky = True
class PoolDetailsTab(tabs.Tab):
name = _("Pool Details")
slug = "pooldetails"
template_name = "project/loadbalancers/_pool_details.html"
def get_context_data(self, request):
pid = self.tab_group.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(request, pid)
except:
pool = []
exceptions.handle(request,
_('Unable to retrieve pool details.'))
return {'pool': pool}
class VipDetailsTab(tabs.Tab):
name = _("VIP Details")
slug = "vipdetails"
template_name = "project/loadbalancers/_vip_details.html"
def get_context_data(self, request):
vid = self.tab_group.kwargs['vip_id']
try:
vip = api.lbaas.vip_get(request, vid)
except:
vip = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve VIP details.'))
return {'vip': vip}
class MemberDetailsTab(tabs.Tab):
name = _("Member Details")
slug = "memberdetails"
template_name = "project/loadbalancers/_member_details.html"
def get_context_data(self, request):
mid = self.tab_group.kwargs['member_id']
try:
member = api.lbaas.member_get(request, mid)
except:
member = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve member details.'))
return {'member': member}
class MonitorDetailsTab(tabs.Tab):
name = _("Monitor Details")
slug = "monitordetails"
template_name = "project/loadbalancers/_monitor_details.html"
def get_context_data(self, request):
mid = self.tab_group.kwargs['monitor_id']
try:
monitor = api.lbaas.pool_health_monitor_get(request, mid)
except:
monitor = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve monitor details.'))
return {'monitor': monitor}
class PoolDetailsTabs(tabs.TabGroup):
slug = "pooltabs"
tabs = (PoolDetailsTab,)
class VipDetailsTabs(tabs.TabGroup):
slug = "viptabs"
tabs = (VipDetailsTab,)
class MemberDetailsTabs(tabs.TabGroup):
slug = "membertabs"
tabs = (MemberDetailsTab,)
class MonitorDetailsTabs(tabs.TabGroup):
slug = "monitortabs"
tabs = (MonitorDetailsTab,)
"""
#########################################################################
Author: Shalin Shah
Project: DNA Cloud
Graduate Mentor: Dixita Limbachya
Mentor: Prof. Manish K Gupta
Date: 28 July 2013
Website: www.guptalab.org/dnacloud
This module is used to create a bracode.
#########################################################################
"""
import sys
import barcode
from barcode import generate
from barcode.writer import ImageWriter
from PIL import PngImagePlugin
#if 'darwin' in sys.platform:
# from PIL import Image
# from PIL import ImageFont
# from PIL import ImageDraw
def generate(details,path):
EAN = barcode.get_barcode_class('code128')
ean = EAN(details, writer=ImageWriter())
barcodePic = ean.save(path + 'barcode')
"""
This is the code to generate QR Code just write it in Main Frame.py and ready to go
######################################################################
#This are the 2 functions which generate QR code
self.photo_max_size = 240
def onUseQrcode(self, text):
qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_M, box_size=10)
qr.add_data(text)
qr.make(fit=True)
x = qr.make_image()
img_file = open(PATH + '/../icons/qr.jpg', 'wb')
x.save(img_file, 'JPEG')
img_file.close()
self.showQRCode(PATH + '/../icons/qr.jpg')
def showQRCode(self, filepath):
img = wx.Image(filepath, wx.BITMAP_TYPE_ANY)
# scale the image, preserving the aspect ratio
W = img.GetWidth()
H = img.GetHeight()
if W > H:
NewW = self.photo_max_size
NewH = self.photo_max_size * H / W
else:
NewH = self.photo_max_size
NewW = self.photo_max_size * W / H
img = img.Scale(NewW,NewH)
if self.pnl.IsShown():
self.pnl.imageCtrl.SetBitmap(wx.BitmapFromImage(img))
elif self.pnl1.IsShown():
self.pnl1.imageCtrl.SetBitmap(wx.BitmapFromImage(img))
self.Refresh()
###############################################################
"""
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
from itertools import izip
from django.db import connections, router, transaction, IntegrityError
from django.db.models.aggregates import Aggregate
from django.db.models.fields import DateField
from django.db.models.query_utils import (Q, select_related_descend,
deferred_class_factory, InvalidQuery)
from django.db.models.deletion import Collector
from django.db.models import signals, sql
from django.utils.copycompat import deepcopy
# Used to control how many objects are worked with at once in some cases (e.g.
# when deleting objects).
CHUNK_SIZE = 100
ITER_CHUNK_SIZE = CHUNK_SIZE
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None):
self.model = model
# EmptyQuerySet instantiates QuerySet with model as None
self._db = using
self.query = query or sql.Query(self.model)
self._result_cache = None
self._iter = None
self._sticky_filter = False
self._for_write = False
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k,v in self.__dict__.items():
if k in ('_iter','_result_cache'):
obj.__dict__[k] = None
else:
obj.__dict__[k] = deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def _result_iter(self):
pos = 0
while 1:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos = pos + 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
iter(self).next()
except StopIteration:
return False
return True
def __contains__(self, val):
# The 'in' operator works without this method, due to __iter__. This
# implementation exists only to shortcut the creation of Model
# instances, by bailing out early if we find a matching element.
pos = 0
if self._result_cache is not None:
if val in self._result_cache:
return True
elif self._iter is None:
# iterator is exhausted, so we have our answer
return False
# remember not to check these again:
pos = len(self._result_cache)
else:
# We need to start filling the result cache out. The following
# ensures that self._iter is not None and self._result_cache is not
# None
it = iter(self)
# Carry on, one result at a time.
while True:
if len(self._result_cache) <= pos:
self._fill_cache(num=1)
if self._iter is None:
# we ran out of items
return False
if self._result_cache[pos] == val:
return True
pos += 1
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
try:
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
except self.model.DoesNotExist, e:
raise IndexError(e.args)
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
combined = self._clone()
if isinstance(other, EmptyQuerySet):
return combined
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
fill_cache = False
if connections[self.db].features.supports_select_related:
fill_cache = self.query.select_related
if isinstance(fill_cache, dict):
requested = fill_cache
else:
requested = None
max_depth = self.query.max_depth
extra_select = self.query.extra_select.keys()
aggregate_select = self.query.aggregate_select.keys()
only_load = self.query.get_loaded_field_names()
if not fill_cache:
fields = self.model._meta.fields
pk_idx = self.model._meta.pk_index()
index_start = len(extra_select)
aggregate_start = index_start + len(self.model._meta.fields)
load_fields = []
# If only/defer clauses have been specified,
# build the list of fields that are to be loaded.
if only_load:
for field, model in self.model._meta.get_fields_with_model():
if model is None:
model = self.model
if field == self.model._meta.pk:
# Record the index of the primary key when it is found
pk_idx = len(load_fields)
try:
if field.name in only_load[model]:
# Add a field that has been explicitly included
load_fields.append(field.name)
except KeyError:
# Model wasn't explicitly listed in the only_load table
# Therefore, we need to load all fields from this model
load_fields.append(field.name)
skip = None
if load_fields and not fill_cache:
# Some fields have been deferred, so we have to initialise
# via keyword arguments.
skip = set()
init_list = []
for field in fields:
if field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.attname)
model_cls = deferred_class_factory(self.model, skip)
# Cache db and model outside the loop
db = self.db
model = self.model
compiler = self.query.get_compiler(using=db)
for row in compiler.results_iter():
if fill_cache:
obj, _ = get_cached_row(model, row,
index_start, using=db, max_depth=max_depth,
requested=requested, offset=len(aggregate_select),
only_load=only_load)
else:
if skip:
row_data = row[index_start:aggregate_start]
pk_val = row_data[pk_idx]
obj = model_cls(**dict(zip(init_list, row_data), __entity_exists=True))
else:
# Omit aggregates in object creation.
obj = model(*row[index_start:aggregate_start], **{'__entity_exists': True})
# Store the source database of the object
obj._state.db = db
# This object came from the database; it's not being added.
obj._state.adding = False
if extra_select:
for i, k in enumerate(extra_select):
setattr(obj, k, row[i])
# Add the aggregates to the model
if aggregate_select:
for i, aggregate in enumerate(aggregate_select):
setattr(obj, aggregate, row[i+aggregate_start])
yield obj
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
for arg in args:
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=True)
return query.get_aggregation(using=self.db)
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist("%s matching query does not exist."
% self.model._meta.object_name)
raise self.model.MultipleObjectsReturned("get() returned more than one %s -- it returned %s! Lookup parameters were %s"
% (self.model._meta.object_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, \
'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
try:
self._for_write = True
return self.get(**lookup), False
except self.model.DoesNotExist:
try:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
sid = transaction.savepoint(using=self.db)
obj.save(force_insert=True, using=self.db)
transaction.savepoint_commit(sid, using=self.db)
return obj, True
except IntegrityError, e:
transaction.savepoint_rollback(sid, using=self.db)
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
raise e
def latest(self, field_name=None):
"""
Returns the latest object, according to the model's 'get_latest_by'
option or optional given field_name.
"""
latest_by = field_name or self.model._meta.get_latest_by
assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.add_ordering('-%s' % latest_by)
return obj.get()
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
assert isinstance(id_list, (tuple, list, set, frozenset)), \
"in_bulk() must be provided with a list of IDs."
if not id_list:
return {}
qs = self._clone()
qs.query.add_filter(('pk__in', id_list))
qs.query.clear_ordering(force_empty=True)
return dict([(obj._get_pk_val(), obj) for obj in qs.iterator()])
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_related = False
del_query.query.clear_ordering()
collector = Collector(using=del_query.db)
collector.collect(del_query)
collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
if not transaction.is_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
rows = query.get_compiler(self.db).execute_sql(None)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(None)
_update.alters_data = True
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (kwargs.keys(),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of datetime objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("month", "year", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self._clone(klass=DateQuerySet, setup=True,
_field_name=field_name, _kind=kind, _order=order)
def none(self):
"""
Returns an empty QuerySet.
"""
return self._clone(klass=EmptyQuerySet)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_related(self, *fields, **kwargs):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
"""
depth = kwargs.pop('depth', 0)
if kwargs:
raise TypeError('Unexpected keyword arguments to select_related: %s'
% (kwargs.keys(),))
obj = self._clone()
if fields:
if depth:
raise TypeError('Cannot pass both "depth" and fields to select_related()')
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
if depth:
obj.query.max_depth = depth
return obj
def dup_select_related(self, other):
"""
Copies the related selection status from the QuerySet 'other' to the
current QuerySet.
"""
self.query.select_related = other.query.select_related
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
for arg in args:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
kwargs[arg.default_alias] = arg
names = getattr(self, '_fields', None)
if names is None:
names = set(self.model._meta.get_all_field_names())
for aggregate in kwargs:
if aggregate in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % aggregate)
obj = self._clone()
obj._setup_aggregate_query(kwargs.keys())
# Add the aggregates to the query
for (alias, aggregate_expr) in kwargs.items():
obj.query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=False)
return obj
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering()
obj.query.add_ordering(*field_names)
return obj
def distinct(self, true_or_false=True):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
obj = self._clone()
obj.query.distinct = true_or_false
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should excecute it's query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.model._meta.ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model)
return self._db or router.db_for_read(self.model)
###################
# PRIVATE METHODS #
###################
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
c = klass(model=self.model, query=query, using=self._db)
c._for_write = self._for_write
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(self._iter.next())
except StopIteration:
self._iter = None
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
pass
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
opts = self.model._meta
if self.query.group_by is None:
field_names = [f.attname for f in opts.fields]
self.query.add_fields(field_names, False)
self.query.set_group_by()
def _prepare(self):
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
obj = self.values("pk")
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related isn't supported in values(). (FIXME -#3358)
self.query.select_related = False
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
def iterator(self):
# Purge any extra columns that haven't been explicitly asked for
extra_names = self.query.extra_select.keys()
field_names = self.field_names
aggregate_names = self.query.aggregate_select.keys()
names = extra_names + field_names + aggregate_names
for row in self.query.get_compiler(self.db).results_iter():
yield dict(zip(names, row))
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query.clear_select_fields()
if self._fields:
self.extra_names = []
self.aggregate_names = []
if not self.query.extra and not self.query.aggregates:
# Short cut - if there are no extra or aggregates, then
# the values() clause must be just field names.
self.field_names = list(self._fields)
else:
self.query.default_cols = False
self.field_names = []
for f in self._fields:
# we inspect the full extra_select list since we might
# be adding back an extra select item that we hadn't
# had selected previously.
if f in self.query.extra:
self.extra_names.append(f)
elif f in self.query.aggregate_select:
self.aggregate_names.append(f)
else:
self.field_names.append(f)
else:
# Default to all fields.
self.extra_names = None
self.field_names = [f.attname for f in self.model._meta.fields]
self.aggregate_names = None
self.query.select = []
if self.extra_names is not None:
self.query.set_extra_mask(self.extra_names)
self.query.add_fields(self.field_names, True)
if self.aggregate_names is not None:
self.query.set_aggregate_mask(self.aggregate_names)
def _clone(self, klass=None, setup=False, **kwargs):
"""
Cloning a ValuesQuerySet preserves the current fields.
"""
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
if not hasattr(c, '_fields'):
# Only clone self._fields if _fields wasn't passed into the cloning
# call directly.
c._fields = self._fields[:]
c.field_names = self.field_names
c.extra_names = self.extra_names
c.aggregate_names = self.aggregate_names
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _merge_sanity_check(self, other):
super(ValuesQuerySet, self)._merge_sanity_check(other)
if (set(self.extra_names) != set(other.extra_names) or
set(self.field_names) != set(other.field_names) or
self.aggregate_names != other.aggregate_names):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
self.query.set_group_by()
if self.aggregate_names is not None:
self.aggregate_names.extend(aggregates)
self.query.set_aggregate_mask(self.aggregate_names)
super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
def _as_sql(self, connection):
"""
For ValueQuerySet (and subclasses like ValuesListQuerySet), they can
only be used as nested queries if they're already set up to select only
a single field (in which case, that is the field column that is
returned). This differs from QuerySet.as_sql(), where the column to
select is set up by Django.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
obj = self._clone()
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
def _prepare(self):
"""
Validates that we aren't trying to do a query like
value__in=qs.values('value1', 'value2'), which isn't valid.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
return self
class ValuesListQuerySet(ValuesQuerySet):
def iterator(self):
if self.flat and len(self._fields) == 1:
for row in self.query.get_compiler(self.db).results_iter():
yield row[0]
elif not self.query.extra_select and not self.query.aggregate_select:
for row in self.query.get_compiler(self.db).results_iter():
yield tuple(row)
else:
# When extra(select=...) or an annotation is involved, the extra
# cols are always at the start of the row, and we need to reorder
# the fields to match the order in self._fields.
extra_names = self.query.extra_select.keys()
field_names = self.field_names
aggregate_names = self.query.aggregate_select.keys()
names = extra_names + field_names + aggregate_names
# If a field list has been specified, use it. Otherwise, use the
# full list of fields, including extras and aggregates.
if self._fields:
fields = list(self._fields) + filter(lambda f: f not in self._fields, aggregate_names)
else:
fields = names
for row in self.query.get_compiler(self.db).results_iter():
data = dict(zip(names, row))
yield tuple([data[f] for f in fields])
def _clone(self, *args, **kwargs):
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
if not hasattr(clone, "flat"):
# Only assign flat if the clone didn't already get it from kwargs
clone.flat = self.flat
return clone
class DateQuerySet(QuerySet):
def iterator(self):
return self.query.get_compiler(self.db).results_iter()
def _setup_query(self):
"""
Sets up any special features of the query attribute.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query = self.query.clone(klass=sql.DateQuery, setup=True)
self.query.select = []
self.query.add_date_select(self._field_name, self._kind, self._order)
def _clone(self, klass=None, setup=False, **kwargs):
c = super(DateQuerySet, self)._clone(klass, False, **kwargs)
c._field_name = self._field_name
c._kind = self._kind
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
class EmptyQuerySet(QuerySet):
def __init__(self, model=None, query=None, using=None):
super(EmptyQuerySet, self).__init__(model, query, using)
self._result_cache = []
def __and__(self, other):
return self._clone()
def __or__(self, other):
return other._clone()
def count(self):
return 0
def delete(self):
pass
def _clone(self, klass=None, setup=False, **kwargs):
c = super(EmptyQuerySet, self)._clone(klass, setup=setup, **kwargs)
c._result_cache = []
return c
def iterator(self):
# This slightly odd construction is because we need an empty generator
# (it raises StopIteration immediately).
yield iter([]).next()
def all(self):
"""
Always returns EmptyQuerySet.
"""
return self
def filter(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def exclude(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def complex_filter(self, filter_obj):
"""
Always returns EmptyQuerySet.
"""
return self
def select_related(self, *fields, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def annotate(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def order_by(self, *field_names):
"""
Always returns EmptyQuerySet.
"""
return self
def distinct(self, true_or_false=True):
"""
Always returns EmptyQuerySet.
"""
return self
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Always returns EmptyQuerySet.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
return self
def reverse(self):
"""
Always returns EmptyQuerySet.
"""
return self
def defer(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def only(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def update(self, **kwargs):
"""
Don't update anything.
"""
return 0
# EmptyQuerySet is always an empty result in where-clauses (and similar
# situations).
value_annotation = False
def get_cached_row(klass, row, index_start, using, max_depth=0, cur_depth=0,
requested=None, offset=0, only_load=None, local_only=False):
"""
Helper function that recursively returns an object with the specified
related attributes already populated.
This method may be called recursively to populate deep select_related()
clauses.
Arguments:
* klass - the class to retrieve (and instantiate)
* row - the row of data returned by the database cursor
* index_start - the index of the row at which data for this
object is known to start
* using - the database alias on which the query is being executed.
* max_depth - the maximum depth to which a select_related()
relationship should be explored.
* cur_depth - the current depth in the select_related() tree.
Used in recursive calls to determin if we should dig deeper.
* requested - A dictionary describing the select_related() tree
that is to be retrieved. keys are field names; values are
dictionaries describing the keys on that related object that
are themselves to be select_related().
* offset - the number of additional fields that are known to
exist in `row` for `klass`. This usually means the number of
annotated results on `klass`.
* only_load - if the query has had only() or defer() applied,
this is the list of field names that will be returned. If None,
the full field list for `klass` can be assumed.
* local_only - Only populate local fields. This is used when building
following reverse select-related relations
"""
if max_depth and requested is None and cur_depth > max_depth:
# We've recursed deeply enough; stop now.
return None
restricted = requested is not None
if only_load:
load_fields = only_load.get(klass)
# When we create the object, we will also be creating populating
# all the parent classes, so traverse the parent classes looking
# for fields that must be included on load.
for parent in klass._meta.get_parent_list():
fields = only_load.get(parent)
if fields:
load_fields.update(fields)
else:
load_fields = None
if load_fields:
# Handle deferred fields.
skip = set()
init_list = []
# Build the list of fields that *haven't* been requested
for field, model in klass._meta.get_fields_with_model():
if field.name not in load_fields:
skip.add(field.name)
elif local_only and model is not None:
continue
else:
init_list.append(field.attname)
# Retrieve all the requested fields
field_count = len(init_list)
fields = row[index_start : index_start + field_count]
# If all the select_related columns are None, then the related
# object must be non-existent - set the relation to None.
# Otherwise, construct the related object.
if fields == (None,) * field_count:
obj = None
elif skip:
klass = deferred_class_factory(klass, skip)
obj = klass(__entity_exists=True, **dict(zip(init_list, fields)))
else:
obj = klass(*fields, **{'__entity_exists': True})
else:
# Load all fields on klass
if local_only:
field_names = [f.attname for f in klass._meta.local_fields]
else:
field_names = [f.attname for f in klass._meta.fields]
field_count = len(field_names)
fields = row[index_start : index_start + field_count]
# If all the select_related columns are None, then the related
# object must be non-existent - set the relation to None.
# Otherwise, construct the related object.
if fields == (None,) * field_count:
obj = None
else:
obj = klass(__entity_exists=True, **dict(zip(field_names, fields)))
# If an object was retrieved, set the database state.
if obj:
obj._state.db = using
obj._state.adding = False
index_end = index_start + field_count + offset
# Iterate over each related object, populating any
# select_related() fields
for f in klass._meta.fields:
if not select_related_descend(f, restricted, requested):
continue
if restricted:
next = requested[f.name]
else:
next = None
# Recursively retrieve the data for the related object
cached_row = get_cached_row(f.rel.to, row, index_end, using,
max_depth, cur_depth+1, next, only_load=only_load)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the base object exists, populate the
# descriptor cache
setattr(obj, f.get_cache_name(), rel_obj)
if f.unique and rel_obj is not None:
# If the field is unique, populate the
# reverse descriptor cache on the related object
setattr(rel_obj, f.related.get_cache_name(), obj)
# Now do the same, but for reverse related objects.
# Only handle the restricted case - i.e., don't do a depth
# descent into reverse relations unless explicitly requested
if restricted:
related_fields = [
(o.field, o.model)
for o in klass._meta.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested, reverse=True):
continue
next = requested[f.related_query_name()]
# Recursively retrieve the data for the related object
cached_row = get_cached_row(model, row, index_end, using,
max_depth, cur_depth+1, next, only_load=only_load, local_only=True)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the field is unique, populate the
# reverse descriptor cache
setattr(obj, f.related.get_cache_name(), rel_obj)
if rel_obj is not None:
# If the related object exists, populate
# the descriptor cache.
setattr(rel_obj, f.get_cache_name(), obj)
# Now populate all the non-local field values
# on the related object
for rel_field,rel_model in rel_obj._meta.get_fields_with_model():
if rel_model is not None:
setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname))
# populate the field cache for any related object
# that has already been retrieved
if rel_field.rel:
try:
cached_obj = getattr(obj, rel_field.get_cache_name())
setattr(rel_obj, rel_field.get_cache_name(), cached_obj)
except AttributeError:
# Related object hasn't been cached yet
pass
return obj, index_end
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None):
self.raw_query = raw_query
self.model = model
self._db = using
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def __iter__(self):
# Mapping of attrnames to row column positions. Used for constructing
# the model using kwargs, needed when not all model's fields are present
# in the query.
model_init_field_names = {}
# A list of tuples of (column name, column position). Used for
# annotation fields.
annotation_fields = []
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
need_resolv_columns = hasattr(compiler, 'resolve_columns')
query = iter(self.query)
# Find out which columns are model's fields, and which ones should be
# annotated to the model.
for pos, column in enumerate(self.columns):
if column in self.model_fields:
model_init_field_names[self.model_fields[column].attname] = pos
else:
annotation_fields.append((column, pos))
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_field_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
# All model's fields are present in the query. So, it is possible
# to use *args based model instantation. For each field of the model,
# record the query column position matching that field.
model_init_field_pos = []
for field in self.model._meta.fields:
model_init_field_pos.append(model_init_field_names[field.attname])
if need_resolv_columns:
fields = [self.model_fields.get(c, None) for c in self.columns]
# Begin looping through the query values.
for values in query:
if need_resolv_columns:
values = compiler.resolve_columns(values, fields)
# Associate fields to values
if skip:
model_init_kwargs = {}
for attname, pos in model_init_field_names.iteritems():
model_init_kwargs[attname] = values[pos]
instance = model_cls(**model_init_kwargs)
else:
model_init_args = [values[pos] for pos in model_init_field_pos]
instance = model_cls(*model_init_args)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
instance._state.db = db
instance._state.adding = False
yield instance
def __repr__(self):
return "" % (self.raw_query % self.params)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model)
def using(self, alias):
"""
Selects which database this Raw QuerySet should excecute it's query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existant column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
def insert_query(model, values, return_id=False, raw_values=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented. It is not
part of the public API.
"""
query = sql.InsertQuery(model)
query.insert_values(values, raw_values)
return query.get_compiler(using=using).execute_sql(return_id)
#!/usr/bin/env python3
"""
Remote python server.
Execute Python commands remotely and send output back.
WARNING: This version has a gaping security hole -- it accepts requests
from any host on the Internet!
"""
import sys
from socket import socket, AF_INET, SOCK_STREAM
import io
import traceback
PORT = 4127
BUFSIZE = 1024
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = PORT
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', port))
s.listen(1)
while True:
conn, (remotehost, remoteport) = s.accept()
print('connection from', remotehost, remoteport)
request = b''
while 1:
data = conn.recv(BUFSIZE)
if not data:
break
request += data
reply = execute(request.decode())
conn.send(reply.encode())
conn.close()
def execute(request):
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = sys.stderr = fakefile = io.StringIO()
try:
try:
exec(request, {}, {})
except:
print()
traceback.print_exc(100)
finally:
sys.stderr = stderr
sys.stdout = stdout
return fakefile.getvalue()
try:
main()
except KeyboardInterrupt:
pass
from distutils.command.bdist import bdist as _bdist
from distutils.core import Command
import os
import shutil
import subprocess
import sys
from setuptools import setup
from setuptools.command.sdist import sdist as _sdist
from pyleus import __version__
from pyleus import BASE_JAR
JAVA_SRC_DIR = "topology_builder/"
BASE_JAR_SRC = os.path.join(JAVA_SRC_DIR, "dist", BASE_JAR)
BASE_JAR_DST = os.path.join("pyleus", BASE_JAR)
class build_java(Command):
description = "Build the topology base JAR"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def _make_jar(self):
subprocess.check_call(["make", "-C", JAVA_SRC_DIR])
def _copy_jar(self):
shutil.copy(BASE_JAR_SRC, BASE_JAR_DST)
def run(self):
self._make_jar()
self._copy_jar()
class bdist(_bdist):
sub_commands = [('build_java', None)]
def run(self):
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
_bdist.run(self)
class sdist(_sdist):
sub_commands = [('build_java', None)]
def run(self):
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
_sdist.run(self)
def readme():
with open("README.rst") as f:
return f.read()
extra_install_requires = []
if sys.version_info < (2, 7):
# argparse is in the standard library of Python >= 2.7
extra_install_requires.append("argparse")
setup(
name="pyleus",
version=__version__,
author="Patrick Lucas",
author_email="plucas@yelp.com",
description="Standard library and deployment tools for using Python "
"with Storm",
long_description=readme(),
url="http://pyleus.org",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Operating System :: OS Independent",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Distributed Computing",
"Development Status :: 4 - Beta",
],
packages=[
"pyleus", "pyleus.cli", "pyleus.cli.commands",
"pyleus.storm", "pyleus.storm.serializers"],
scripts=["scripts/pyleus"],
install_requires=[
"PyYAML",
"msgpack-python",
"virtualenv",
"six",
] + extra_install_requires,
package_data={'pyleus': [BASE_JAR]},
cmdclass={
'build_java': build_java,
'bdist': bdist,
'sdist': sdist,
},
)
#!/usr/bin/python
import time
import RPi.GPIO as GPIO
# remember to change the GPIO values below to match your sensors
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
def reading(sensor):
# Disable any warning message such as GPIO pins in use
GPIO.setwarnings(False)
# use the values of the GPIO pins, and not the actual pin number
# so if you connect to GPIO 25 which is on pin number 22, the
# reference in this code is 25, which is the number of the GPIO
# port and not the number of the physical pin
GPIO.setmode(GPIO.BCM)
if sensor == 0:
# point the software to the GPIO pins the sensor is using
# change these values to the pins you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
GPIO.setup(17,GPIO.OUT)
GPIO.setup(27,GPIO.IN)
GPIO.output(17, GPIO.LOW)
# found that the sensor can crash if there isn't a delay here
# no idea why. If you have odd crashing issues, increase delay
time.sleep(0.3)
# sensor manual says a pulse ength of 10Us will trigger the
# sensor to transmit 8 cycles of ultrasonic burst at 40kHz and
# wait for the reflected ultrasonic burst to be received
# to get a pulse length of 10Us we need to start the pulse, then
# wait for 10 microseconds, then stop the pulse. This will
# result in the pulse length being 10Us.
# start the pulse on the GPIO pin
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(17, True)
# wait 10 micro seconds (this is 0.00001 seconds) so the pulse
# length is 10Us as the sensor expects
time.sleep(0.00001)
# stop the pulse after the time above has passed
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(17, False)
# listen to the input pin. 0 means nothing is happening. Once a
# signal is received the value will be 1 so the while loop
# stops and has the last recorded time the signal was 0
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on the sensor
while GPIO.input(27) == 0:
signaloff = time.time()
# listen to the input pin. Once a signal is received, record the
# time the signal came through
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on the sensor
while GPIO.input(27) == 1:
signalon = time.time()
# work out the difference in the two recorded times above to
# calculate the distance of an object in front of the sensor
timepassed = signalon - signaloff
# we now have our distance but it's not in a useful unit of
# measurement. So now we convert this distance into centimetres
distance = timepassed * 17000
# return the distance of an object in front of the sensor in cm
return distance
# we're no longer using the GPIO, so tell software we're done
GPIO.cleanup()
else:
print "Incorrect usonic() function varible."
while 1:
print reading(0)
time.sleep(0.5)
#coding:utf-8
import re
import commands
###################################
#
# 测试系统日志是否做了权限限制
#
####################################
class TvarfileMod:
global results,sorce
results=[]
sorce=60
def setBaseline_main(self,baseline_main):
self.baseline_main=baseline_main
def start(self):
print "[*] Checking TvarfileMod!!"
check_list=['/var/log/message','/var/log/secure','/var/log/maillog','/var/log/cron','/var/log/spooler','/var/log/boot.log']
try:
for item in check_list:
if os.path.exists(item):
test_com=commands.getoutput("ls -l "+item).split(" ")
if not test_com[0]=="-rw-r-----":
sorce=sorce-10
results.append({item:test_com[0]})
except:
pass
# print self.baseline_main.output_name
def save(self):
if sorce<60:
self.baseline_main.xml_result({"mod_id":"37wan-centOS-06","mod_name":"TvarfileMod","status":"1","results":str(results)})
else:
self.baseline_main.xml_result({"mod_id":"37wan-centOS-06","mod_name":"TvarfileMod","status":"0","results":"null"})
print "[*] TvarfileMod Finish!"
def getPluginClass():
return TvarfileMod
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutHashes in the Ruby Koans
#
from runner.koan import *
class AboutDictionaries(Koan):
def test_creating_dictionaries(self):
empty_dict = dict()
self.assertEqual(dict, type(empty_dict))
self.assertDictEqual({}, empty_dict)
self.assertEqual(__, len(empty_dict))
def test_dictionary_literals(self):
empty_dict = {}
self.assertEqual(dict, type(empty_dict))
babel_fish = { 'one': 'uno', 'two': 'dos' }
self.assertEqual(__, len(babel_fish))
def test_accessing_dictionaries(self):
babel_fish = { 'one': 'uno', 'two': 'dos' }
self.assertEqual(__, babel_fish['one'])
self.assertEqual(__, babel_fish['two'])
def test_changing_dictionaries(self):
babel_fish = { 'one': 'uno', 'two': 'dos' }
babel_fish['one'] = 'eins'
expected = { 'two': 'dos', 'one': __ }
self.assertDictEqual(expected, babel_fish)
def test_dictionary_is_unordered(self):
dict1 = { 'one': 'uno', 'two': 'dos' }
dict2 = { 'two': 'dos', 'one': 'uno' }
self.assertEqual(__, dict1 == dict2)
def test_dictionary_keys_and_values(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, len(babel_fish.keys()))
self.assertEqual(__, len(babel_fish.values()))
self.assertEqual(__, 'one' in babel_fish.keys())
self.assertEqual(__, 'two' in babel_fish.values())
self.assertEqual(__, 'uno' in babel_fish.keys())
self.assertEqual(__, 'dos' in babel_fish.values())
def test_making_a_dictionary_from_a_sequence_of_keys(self):
cards = {}.fromkeys(('red warrior', 'green elf', 'blue valkyrie', 'yellow dwarf', 'confused looking zebra'), 42)
self.assertEqual(__, len(cards))
self.assertEqual(__, cards['green elf'])
self.assertEqual(__, cards['yellow dwarf'])
# -*- coding: utf-8 -*-
r"""
werkzeug.posixemulation
~~~~~~~~~~~~~~~~~~~~~~~
Provides a POSIX emulation for some features that are relevant to
web applications. The main purpose is to simplify support for
systems such as Windows NT that are not 100% POSIX compatible.
Currently this only implements a :func:`rename` function that
follows POSIX semantics. Eg: if the target file already exists it
will be replaced without asking.
This module was introduced in 0.6.1 and is not a public interface.
It might become one in later versions of Werkzeug.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import errno
import time
import random
can_rename_open_file = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
if not isinstance(src, unicode):
src = unicode(src, sys.getfilesystemencoding())
if not isinstance(dst, unicode):
dst = unicode(dst, sys.getfilesystemencoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, ta)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
'''
Copyright (c) <2012> Tarek Galal
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from .mechanisms.wauth import WAuth as AuthMechanism
from ..Common.constants import Constants
from ..Common.debugger import Debugger
class YowsupAuth:
def __init__(self, connection):
Debugger.attach(self)
self.connection = connection
self.mechanism = AuthMechanism
self.authenticated = False
self.username = None
self.password = None
self.domain = None
self.resource = None
self.supportsReceiptAcks = True
self.accountKind = None
self.expireData = None
self.authCallbacks = []
def isAuthenticated(self):
return self.authenticated
def onAuthenticated(self, callback):
self.authCallbacks.append(callback)
def authenticationComplete(self):
self.authenticated = True
#should process callbacks
def authenticationFailed(self):
self._d("Authentication failed!!")
def authenticate(self, username, password, domain, resource):
self._d("Connecting to %s" % Constants.host)
#connection = ConnectionEngine()
self.connection.connect((Constants.host, Constants.port));
self.mechanism = AuthMechanism(self.connection)
self.mechanism.setAuthObject(self)
self.username = username
self.password = password
self.domain = domain
self.resource = resource
self.jid = "%s@%s"%(self.username,self.domain)
connection = self.mechanism.login(username, password, domain, resource)
return connection
#!/usr/bin/env python2.7
from common_methods import exit_script, display_usage, exit_error
import sys, os
import config
import config_gen
def generate_confs():
"""
For each section generate config files if TEMPLATE_CONFIG is present into OUTPUT_CONFIG
Exception for HOSTAPD as it may have many variables which is not intended to be specified through TEMPLATE_CONFIG
"""
global_config = config_gen.get_config()
for section in global_config.keys():
if global_config[section].has_key('TEMPLATE_CONFIG'):
if not global_config[section].has_key('OUTPUT_CONFIG'):
exit_error("[ERROR] 'OUTPUT_CONFIG' not specified for '" + section + "'")
template_file = global_config[section]['TEMPLATE_CONFIG']
template_str = ''
try:
with open(template_file) as f:
template_str = f.read()
except:
exit_error("[ERROR] Template File for '" + section + "', " + template_file + " does not exist")
for key, val in global_config[section].items():
template_str = template_str.replace('$' + key + '$', val)
try:
with open(global_config[section]['OUTPUT_CONFIG'], 'wb') as f:
print 'Writing', f.name, '...'
f.write(template_str)
except:
exit_error("[ERROR] Failed to open output_config '" + global_config[section]['OUTPUT_CONFIG'] + "' in write mode")
elif section == 'HOSTAPD':
write_hostapd_conf(global_config)
def write_hostapd_conf(global_config):
config_output = global_config['HOSTAPD']['OUTPUT_CONFIG']
print 'Writing', config_output, '...'
try:
with open(config_output, 'w') as f:
for key, val in global_config['HOSTAPD'].items():
if key not in config.special_options:
f.write( key + '=' + val + '\n' )
except:
exit_error('[ERROR] Failed to open ' + config_output + ' in write mode')
def test():
config_gen.init()
generate_confs()
if __name__ == '__main__':
test()
import os
import sys
from time import sleep
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.db import transaction
from denorm import denorms
PID_FILE = "/tmp/django-denorm-daemon-pid"
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('-n',
action='store_true',
dest='foreground',
default=False,
help='Run in foreground',
),
make_option('-i',
action='store',
type='int',
dest='interval',
default=1,
help='The interval - in seconds - between each update',
),
make_option('-f', '--pidfile',
action='store',
type='string',
dest='pidfile',
default=PID_FILE,
help='The pid file to use. Defaults to "%s".' % PID_FILE)
)
help = "Runs a daemon that checks for dirty fields and updates them in regular intervals."
def pid_exists(self, pidfile):
try:
pid = int(file(pidfile, 'r').read())
os.kill(pid, 0)
self.stderr.write(self.style.ERROR("daemon already running as pid: %s\n" % (pid,)))
return True
except OSError, err:
return err.errno == os.errno.EPERM
except IOError, err:
if err.errno == 2:
return False
else:
raise
@transaction.commit_manually
def handle_noargs(self, **options):
foreground = options['foreground']
interval = options['interval']
pidfile = options['pidfile']
if self.pid_exists(pidfile):
return
if not foreground:
from denorm import daemon
daemon.daemonize(noClose=True, pidfile=pidfile)
while True:
try:
denorms.flush()
sleep(interval)
transaction.commit()
except KeyboardInterrupt:
transaction.commit()
sys.exit()
# TestSwiftMetatype.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test the formatting of Swift metatypes
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftMetatype(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@swiftTest
def test_metatype(self):
"""Test the formatting of Swift metatypes"""
self.build()
target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(
self, 'Set breakpoint here', lldb.SBFileSpec('main.swift'))
frame = thread.frames[0]
self.assertTrue(frame, "Frame 0 is valid.")
var_s = frame.FindVariable("s")
var_c = frame.FindVariable("c")
var_f = frame.FindVariable("f")
var_t = frame.FindVariable("t")
var_p = frame.FindVariable("p")
lldbutil.check_variable(self, var_s, False, "String")
lldbutil.check_variable(self, var_c, False, "a.D")
lldbutil.check_variable(self, var_f, False, "(Int) -> Int")
lldbutil.check_variable(self, var_t, False, "(Int, Int, String)")
lldbutil.check_variable(self, var_p, False, "a.P")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from .error import *
from .nodes import *
import datetime, sys, copyreg, types, base64
class RepresenterError(YAMLError):
pass
class BaseRepresenter:
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, str(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
@classmethod
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
@classmethod
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = list(mapping.items())
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, bytes, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
def represent_str(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', data)
def represent_binary(self, data):
if hasattr(base64, 'encodebytes'):
data = base64.encodebytes(data).decode('ascii')
else:
data = base64.encodestring(data).decode('ascii')
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
def represent_bool(self, data):
if data:
value = 'true'
else:
value = 'false'
return self.represent_scalar('tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = '.nan'
elif data == self.inf_value:
value = '.inf'
elif data == -self.inf_value:
value = '-.inf'
else:
value = repr(data).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if '.' not in value and 'e' in value:
value = value.replace('e', '.0e', 1)
return self.represent_scalar('tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence('tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping('tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = data.isoformat()
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = data.isoformat(' ')
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(bytes,
SafeRepresenter.represent_binary)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_complex(self, data):
if data.imag == 0.0:
data = '%r' % data.real
elif data.real == 0.0:
data = '%rj' % data.imag
elif data.imag > 0:
data = '%r+%rj' % (data.real, data.imag)
else:
data = '%r%rj' % (data.real, data.imag)
return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = '%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
def represent_module(self, data):
return self.represent_scalar(
'tag:yaml.org,2002:python/module:'+data.__name__, '')
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copyreg.dispatch_table:
reduce = copyreg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = 'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = 'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = '%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(object,
Representer.represent_object)
# -*- coding: utf-8 -*-
import time
import openerp
class m(openerp.osv.osv.Model):
""" This model exposes a few methods that will consume between 'almost no
resource' and 'a lot of resource'.
"""
_name = 'test.limits.model'
def consume_nothing(self, cr, uid, context=None):
return True
def consume_memory(self, cr, uid, size, context=None):
l = [0] * size
return True
def leak_memory(self, cr, uid, size, context=None):
if not hasattr(self, 'l'):
self.l = []
self.l.append([0] * size)
return True
def consume_time(self, cr, uid, seconds, context=None):
time.sleep(seconds)
return True
def consume_cpu_time(self, cr, uid, seconds, context=None):
t0 = time.clock()
t1 = time.clock()
while t1 - t0 < seconds:
for i in xrange(10000000):
x = i * i
t1 = time.clock()
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import random
import re
import shutil
import string
import sys
import tempfile
from webkitpy.common.memoized import memoized
from webkitpy.common.system.executive import Executive, ScriptError
from .scm import AuthenticationError, SCM, commit_error_handler
_log = logging.getLogger(__name__)
# A mixin class that represents common functionality for SVN and Git-SVN.
class SVNRepository(object):
# FIXME: These belong in common.config.urls
svn_server_host = "svn.webkit.org"
svn_server_realm = " Mac OS Forge"
def has_authorization_for_realm(self, realm, home_directory=os.getenv("HOME")):
# If we are working on a file:// repository realm will be None
if realm is None:
return True
# ignore false positives for methods implemented in the mixee class. pylint: disable=E1101
# Assumes find and grep are installed.
if not os.path.isdir(os.path.join(home_directory, ".subversion")):
return False
find_args = ["find", ".subversion", "-type", "f", "-exec", "grep", "-q", realm, "{}", ";", "-print"]
find_output = self.run(find_args, cwd=home_directory, error_handler=Executive.ignore_error).rstrip()
if not find_output or not os.path.isfile(os.path.join(home_directory, find_output)):
return False
# Subversion either stores the password in the credential file, indicated by the presence of the key "password",
# or uses the system password store (e.g. Keychain on Mac OS X) as indicated by the presence of the key "passtype".
# We assume that these keys will not coincide with the actual credential data (e.g. that a person's username
# isn't "password") so that we can use grep.
if self.run(["grep", "password", find_output], cwd=home_directory, return_exit_code=True) == 0:
return True
return self.run(["grep", "passtype", find_output], cwd=home_directory, return_exit_code=True) == 0
class SVN(SCM, SVNRepository):
executable_name = "svn"
_svn_metadata_files = frozenset(['.svn', '_svn'])
def __init__(self, cwd, patch_directories, **kwargs):
SCM.__init__(self, cwd, **kwargs)
self._bogus_dir = None
if patch_directories == []:
raise Exception(message='Empty list of patch directories passed to SCM.__init__')
elif patch_directories == None:
self._patch_directories = [self._filesystem.relpath(cwd, self.checkout_root)]
else:
self._patch_directories = patch_directories
@classmethod
def in_working_directory(cls, path, executive=None):
if os.path.isdir(os.path.join(path, '.svn')):
# This is a fast shortcut for svn info that is usually correct for SVN < 1.7,
# but doesn't work for SVN >= 1.7.
return True
executive = executive or Executive()
svn_info_args = [cls.executable_name, 'info']
exit_code = executive.run_command(svn_info_args, cwd=path, return_exit_code=True)
return (exit_code == 0)
def find_uuid(self, path):
if not self.in_working_directory(path):
return None
return self.value_from_svn_info(path, 'Repository UUID')
@classmethod
def value_from_svn_info(cls, path, field_name):
svn_info_args = [cls.executable_name, 'info']
# FIXME: This method should use a passed in executive or be made an instance method and use self._executive.
info_output = Executive().run_command(svn_info_args, cwd=path).rstrip()
match = re.search("^%s: (?P.+)$" % field_name, info_output, re.MULTILINE)
if not match:
raise ScriptError(script_args=svn_info_args, message='svn info did not contain a %s.' % field_name)
return match.group('value').rstrip('\r')
def find_checkout_root(self, path):
uuid = self.find_uuid(path)
# If |path| is not in a working directory, we're supposed to return |path|.
if not uuid:
return path
# Search up the directory hierarchy until we find a different UUID.
last_path = None
while True:
if uuid != self.find_uuid(path):
return last_path
last_path = path
(path, last_component) = self._filesystem.split(path)
if last_path == path:
return None
@staticmethod
def commit_success_regexp():
return "^Committed revision (?P\d+)\.$"
def _run_svn(self, args, **kwargs):
return self.run([self.executable_name] + args, **kwargs)
@memoized
def svn_version(self):
return self._run_svn(['--version', '--quiet'])
def has_working_directory_changes(self):
# FIXME: What about files which are not committed yet?
return self._run_svn(["diff"], cwd=self.checkout_root, decode_output=False) != ""
def discard_working_directory_changes(self):
# Make sure there are no locks lying around from a previously aborted svn invocation.
# This is slightly dangerous, as it's possible the user is running another svn process
# on this checkout at the same time. However, it's much more likely that we're running
# under windows and svn just sucks (or the user interrupted svn and it failed to clean up).
self._run_svn(["cleanup"], cwd=self.checkout_root)
# svn revert -R is not as awesome as git reset --hard.
# It will leave added files around, causing later svn update
# calls to fail on the bots. We make this mirror git reset --hard
# by deleting any added files as well.
added_files = reversed(sorted(self.added_files()))
# added_files() returns directories for SVN, we walk the files in reverse path
# length order so that we remove files before we try to remove the directories.
self._run_svn(["revert", "-R", "."], cwd=self.checkout_root)
for path in added_files:
# This is robust against cwd != self.checkout_root
absolute_path = self.absolute_path(path)
# Completely lame that there is no easy way to remove both types with one call.
if os.path.isdir(path):
os.rmdir(absolute_path)
else:
os.remove(absolute_path)
def status_command(self):
return [self.executable_name, 'status']
def _status_regexp(self, expected_types):
field_count = 6 if self.svn_version() > "1.6" else 5
return "^(?P[%s]).{%s} (?P.+)$" % (expected_types, field_count)
def _add_parent_directories(self, path):
"""Does 'svn add' to the path and its parents."""
if self.in_working_directory(path):
return
self.add(path)
def add_list(self, paths):
for path in paths:
self._add_parent_directories(os.path.dirname(os.path.abspath(path)))
if self.svn_version() >= "1.7":
# For subversion client 1.7 and later, need to add '--parents' option to ensure intermediate directories
# are added; in addition, 1.7 returns an exit code of 1 from svn add if one or more of the requested
# adds are already under version control, including intermediate directories subject to addition
# due to --parents
svn_add_args = ['svn', 'add', '--parents'] + paths
exit_code = self.run(svn_add_args, return_exit_code=True)
if exit_code and exit_code != 1:
raise ScriptError(script_args=svn_add_args, exit_code=exit_code)
else:
self._run_svn(["add"] + paths)
def _delete_parent_directories(self, path):
if not self.in_working_directory(path):
return
if set(os.listdir(path)) - self._svn_metadata_files:
return # Directory has non-trivial files in it.
self.delete(path)
def delete_list(self, paths):
for path in paths:
abs_path = os.path.abspath(path)
parent, base = os.path.split(abs_path)
result = self._run_svn(["delete", "--force", base], cwd=parent)
self._delete_parent_directories(os.path.dirname(abs_path))
return result
def exists(self, path):
return not self._run_svn(["info", path], return_exit_code=True, decode_output=False)
def changed_files(self, git_commit=None):
status_command = [self.executable_name, "status"]
status_command.extend(self._patch_directories)
# ACDMR: Addded, Conflicted, Deleted, Modified or Replaced
return self.run_status_and_extract_filenames(status_command, self._status_regexp("ACDMR"))
def changed_files_for_revision(self, revision):
# As far as I can tell svn diff --summarize output looks just like svn status output.
# No file contents printed, thus utf-8 auto-decoding in self.run is fine.
status_command = [self.executable_name, "diff", "--summarize", "-c", revision]
return self.run_status_and_extract_filenames(status_command, self._status_regexp("ACDMR"))
def revisions_changing_file(self, path, limit=5):
revisions = []
# svn log will exit(1) (and thus self.run will raise) if the path does not exist.
log_command = ['log', '--quiet', '--limit=%s' % limit, path]
for line in self._run_svn(log_command, cwd=self.checkout_root).splitlines():
match = re.search('^r(?P\d+) ', line)
if not match:
continue
revisions.append(int(match.group('revision')))
return revisions
def conflicted_files(self):
return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("C"))
def added_files(self):
return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("A"))
def deleted_files(self):
return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("D"))
@staticmethod
def supports_local_commits():
return False
def display_name(self):
return "svn"
def svn_revision(self, path):
return self.value_from_svn_info(path, 'Revision')
def timestamp_of_revision(self, path, revision):
# We use --xml to get timestamps like 2013-02-08T08:18:04.964409Z
repository_root = self.value_from_svn_info(self.checkout_root, 'Repository Root')
info_output = Executive().run_command([self.executable_name, 'log', '-r', revision, '--xml', repository_root], cwd=path).rstrip()
match = re.search(r"^(?P.+)\r?$", info_output, re.MULTILINE)
return match.group('value')
# FIXME: This method should be on Checkout.
def create_patch(self, git_commit=None, changed_files=None):
"""Returns a byte array (str()) representing the patch file.
Patch files are effectively binary since they may contain
files of multiple different encodings."""
if changed_files == []:
return ""
elif changed_files == None:
changed_files = []
return self.run([self.script_path("svn-create-patch")] + changed_files,
cwd=self.checkout_root, return_stderr=False,
decode_output=False)
def committer_email_for_revision(self, revision):
return self._run_svn(["propget", "svn:author", "--revprop", "-r", revision]).rstrip()
def contents_at_revision(self, path, revision):
"""Returns a byte array (str()) containing the contents
of path @ revision in the repository."""
remote_path = "%s/%s" % (self._repository_url(), path)
return self._run_svn(["cat", "-r", revision, remote_path], decode_output=False)
def diff_for_revision(self, revision):
# FIXME: This should probably use cwd=self.checkout_root
return self._run_svn(['diff', '-c', revision])
def _bogus_dir_name(self):
rnd = ''.join(random.sample(string.ascii_letters, 5))
if sys.platform.startswith("win"):
parent_dir = tempfile.gettempdir()
else:
parent_dir = sys.path[0] # tempdir is not secure.
return os.path.join(parent_dir, "temp_svn_config_" + rnd)
def _setup_bogus_dir(self, log):
self._bogus_dir = self._bogus_dir_name()
if not os.path.exists(self._bogus_dir):
os.mkdir(self._bogus_dir)
self._delete_bogus_dir = True
else:
self._delete_bogus_dir = False
if log:
log.debug(' Html: temp config dir: "%s".', self._bogus_dir)
def _teardown_bogus_dir(self, log):
if self._delete_bogus_dir:
shutil.rmtree(self._bogus_dir, True)
if log:
log.debug(' Html: removed temp config dir: "%s".', self._bogus_dir)
self._bogus_dir = None
def diff_for_file(self, path, log=None):
self._setup_bogus_dir(log)
try:
args = ['diff']
if self._bogus_dir:
args += ['--config-dir', self._bogus_dir]
args.append(path)
return self._run_svn(args, cwd=self.checkout_root)
finally:
self._teardown_bogus_dir(log)
def show_head(self, path):
return self._run_svn(['cat', '-r', 'BASE', path], decode_output=False)
def _repository_url(self):
return self.value_from_svn_info(self.checkout_root, 'URL')
def apply_reverse_diff(self, revision):
# '-c -revision' applies the inverse diff of 'revision'
svn_merge_args = ['merge', '--non-interactive', '-c', '-%s' % revision, self._repository_url()]
_log.warning("svn merge has been known to take more than 10 minutes to complete. It is recommended you use git for rollouts.")
_log.debug("Running 'svn %s'" % " ".join(svn_merge_args))
# FIXME: Should this use cwd=self.checkout_root?
self._run_svn(svn_merge_args)
def revert_files(self, file_paths):
# FIXME: This should probably use cwd=self.checkout_root.
self._run_svn(['revert'] + file_paths)
def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
# git-commit and force are not used by SVN.
svn_commit_args = ["commit"]
if not username and not self.has_authorization_for_realm(self.svn_server_realm):
raise AuthenticationError(self.svn_server_host)
if username:
svn_commit_args.extend(["--username", username])
svn_commit_args.extend(["-m", message])
if changed_files:
svn_commit_args.extend(changed_files)
return self._run_svn(svn_commit_args, cwd=self.checkout_root, error_handler=commit_error_handler)
def svn_commit_log(self, svn_revision):
svn_revision = self.strip_r_from_svn_revision(svn_revision)
return self._run_svn(['log', '--non-interactive', '--revision', svn_revision])
def last_svn_commit_log(self):
# BASE is the checkout revision, HEAD is the remote repository revision
# http://svnbook.red-bean.com/en/1.0/ch03s03.html
return self.svn_commit_log('BASE')
def svn_blame(self, path):
return self._run_svn(['blame', path])
def propset(self, pname, pvalue, path):
dir, base = os.path.split(path)
return self._run_svn(['pset', pname, pvalue, base], cwd=dir)
def propget(self, pname, path):
dir, base = os.path.split(path)
return self._run_svn(['pget', pname, base], cwd=dir).encode('utf-8').rstrip("\n")
#!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.1 (specifically,
3.1.2), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[^ -~]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile('^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub('\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending