\w+)%s$" \
% trailing_slash(), self.wrap_view('handle_nested'),
name='api_handle_nested'),
]
def handle_nested(self, request, **kwargs):
resource_name = kwargs.pop('nest_resource')
resource = self.fields[resource_name].to_class().__class__
try:
stripped_kwargs = self.remove_api_resource_names(kwargs)
obj = self.cached_obj_get(request=request, **stripped_kwargs)
except ObjectDoesNotExist:
return HttpGone()
except MultipleObjectsReturned:
return HttpMultipleChoices('Multiple objects with this PK.')
r = resource()
if request.method.lower() == 'get':
return r.get_list(request, report=obj.pk)
elif request.method.lower() == 'post':
cont_type = request.META.get('CONTENT_TYPE', 'application/json')
deserialized = r.deserialize(request, format=cont_type)
report_uri = ReportResource().get_resource_uri(obj)
user_uri = UserResource().get_resource_uri(request.user)
parms = {'report': report_uri, 'user': user_uri}
if 'form' in cont_type:
deserialized = dict(
(str(k), v[0] if (type(v)==list and len(v)>0) else v) \
for k, v in deserialized.iteritems())
parms.update(deserialized)
try:
bundle = r.build_bundle(
data=dict_strip_unicode_keys(parms),
request=request
)
r.is_valid(bundle, request)
r.obj_create(bundle) # this creates the actual child
except:
raise ValueError(parms)
bundle_dehyd = r.full_dehydrate(bundle);
resp = r.create_response(request, bundle_dehyd)
resp['location'] = r.get_resource_uri(bundle)
resp.status_code = 201
return resp
else:
raise NotImplementedError('In POST and GET we trust.')
class UserResource(ModelResource):
class Meta:
resource_name = 'users'
queryset = User.objects.all()
excludes = ['email', 'password', 'is_active', 'last_login',
'first_name', 'last_name',
'date_joined', 'is_staff', 'is_superuser']
allowed_methods = ['get']
#cache = SimpleCache()
filtering = { 'username': ALL, }
ordering = ['username', 'id']
class StatusResource(ModelResource):
class Meta:
resource_name = 'statuses'
queryset = api.models.Status.objects.all()
#cache = SimpleCache()
class CategoryResource(ModelResource):
class Meta:
resource_name = 'categories'
queryset = api.models.Category.objects.all()
#cache = SimpleCache()
class AddressResource(ModelResource):
class Meta:
queryset = api.models.Address.objects.all()
excludes = ['id']
#cache = SimpleCache()
class CommentResource(ModelResource):
report = fields.ToOneField('api.tasty.ReportResource', 'report')
user = fields.ToOneField(UserResource, 'user', full=True)
newStatus = fields.ToOneField(StatusResource, 'newStatus', full=True,
blank=True, null=True)
class Meta:
resource_name = 'comments'
queryset = api.models.Comment.objects.all()
authentication = Authentication()
authorization = Authorization()
#cache = SimpleCache()
filtering = {
'report': ALL,
'user': ALL_WITH_RELATIONS,
}
ordering = ['creationTime', 'newStatus', 'user', 'report', 'id']
class RatingResource(ModelResource):
report = fields.ToOneField('api.tasty.ReportResource', 'report')
user = fields.ToOneField(UserResource, 'user', full=True)
class Meta:
resource_name = 'ratings'
queryset = api.models.Rating.objects.all()
#cache = SimpleCache()
filtering = {
'report': ALL,
'user': ALL_WITH_RELATIONS,
}
class PhotoResource(ModelResource):
report = fields.ToOneField('api.tasty.ReportResource', 'report')
user = fields.ToOneField(UserResource, 'user')
class Meta:
resource_name = 'photos'
queryset = api.models.Photo.objects.all()
#cache = SimpleCache()
filtering = {
'report': ALL,
}
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.core.management.base import BaseCommand
from django.db import transaction
from apps.core.models import Group
from apps.survey.models import (Blockface, Territory,
BlockfaceReservation)
class Command(BaseCommand):
"""
Assign all expert_required blockfaces to a specified group
Usage:
./manage.py assign_expert_blocks some-group-slug
"""
@transaction.atomic
def handle(self, *args, **options):
group_slug = args[0]
group = Group.objects.get(slug=group_slug)
print("Assigning expert blocks to %s" % group.name)
already_assigned_ids = Territory.objects.filter(group=group)\
.values_list('blockface_id',
flat=True)
print("Skipping %d blocks already assigned" %
already_assigned_ids.count())
new_expert_blocks = Blockface.objects\
.filter(expert_required=True)\
.exclude(id__in=already_assigned_ids)
assigned_to_others = Territory.objects\
.filter(blockface__in=new_expert_blocks)
print("Removing %d assignments to groups other than %s" %
(assigned_to_others.count(), group.name))
assigned_to_others.delete()
old_reservations =\
BlockfaceReservation.objects\
.filter(blockface__in=new_expert_blocks)
print("Removing %d reservations on blocks that are being reassigned" %
old_reservations.count())
old_reservations.delete()
print("Assigning %d blocks to %s" %
(new_expert_blocks.count(), group.name))
for blockface in new_expert_blocks:
Territory.objects.create(group=group, blockface=blockface)
"""
tests.test_component_demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo component.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.automation as automation
import homeassistant.components.automation.state as state
from homeassistant.const import CONF_PLATFORM
class TestAutomationState(unittest.TestCase):
""" Test the event automation. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
self.hass.states.set('test.entity', 'hello')
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_setup_fails_if_no_entity_id(self):
self.assertFalse(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'state',
automation.CONF_SERVICE: 'test.automation'
}
}))
def test_if_fires_on_entity_change(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'state',
state.CONF_ENTITY_ID: 'test.entity',
automation.CONF_SERVICE: 'test.automation'
}
}))
self.hass.states.set('test.entity', 'world')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_entity_change_with_from_filter(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'state',
state.CONF_ENTITY_ID: 'test.entity',
state.CONF_FROM: 'hello',
automation.CONF_SERVICE: 'test.automation'
}
}))
self.hass.states.set('test.entity', 'world')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_entity_change_with_to_filter(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'state',
state.CONF_ENTITY_ID: 'test.entity',
state.CONF_TO: 'world',
automation.CONF_SERVICE: 'test.automation'
}
}))
self.hass.states.set('test.entity', 'world')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_fires_on_entity_change_with_both_filters(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'state',
state.CONF_ENTITY_ID: 'test.entity',
state.CONF_FROM: 'hello',
state.CONF_TO: 'world',
automation.CONF_SERVICE: 'test.automation'
}
}))
self.hass.states.set('test.entity', 'world')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_if_not_fires_if_to_filter_not_match(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'state',
state.CONF_ENTITY_ID: 'test.entity',
state.CONF_FROM: 'hello',
state.CONF_TO: 'world',
automation.CONF_SERVICE: 'test.automation'
}
}))
self.hass.states.set('test.entity', 'moon')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_not_fires_if_from_filter_not_match(self):
self.hass.states.set('test.entity', 'bye')
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'state',
state.CONF_ENTITY_ID: 'test.entity',
state.CONF_FROM: 'hello',
state.CONF_TO: 'world',
automation.CONF_SERVICE: 'test.automation'
}
}))
self.hass.states.set('test.entity', 'world')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_if_not_fires_if_entity_not_match(self):
self.assertTrue(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'state',
state.CONF_ENTITY_ID: 'test.another_entity',
automation.CONF_SERVICE: 'test.automation'
}
}))
self.hass.states.set('test.entity', 'world')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
#
##############################################################################
import partner
import invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
# IMPORTANT: only import safe functions as this module will be included in jinja environment
import frappe
import operator
import re, urllib, datetime, math
import babel.dates
# datetime functions
def getdate(string_date):
"""
Coverts string date (yyyy-mm-dd) to datetime.date object
"""
if isinstance(string_date, datetime.date):
return string_date
elif isinstance(string_date, datetime.datetime):
return string_date.date()
if " " in string_date:
string_date = string_date.split(" ")[0]
return datetime.datetime.strptime(string_date, "%Y-%m-%d").date()
def add_to_date(date, years=0, months=0, days=0):
"""Adds `days` to the given date"""
format = isinstance(date, basestring)
if date:
date = getdate(date)
else:
raise Exception, "Start date required"
from dateutil.relativedelta import relativedelta
date += relativedelta(years=years, months=months, days=days)
if format:
return date.strftime("%Y-%m-%d")
else:
return date
def add_days(date, days):
return add_to_date(date, days=days)
def add_months(date, months):
return add_to_date(date, months=months)
def add_years(date, years):
return add_to_date(date, years=years)
def date_diff(string_ed_date, string_st_date):
return (getdate(string_ed_date) - getdate(string_st_date)).days
def time_diff(string_ed_date, string_st_date):
return get_datetime(string_ed_date) - get_datetime(string_st_date)
def time_diff_in_seconds(string_ed_date, string_st_date):
return time_diff(string_ed_date, string_st_date).total_seconds()
def time_diff_in_hours(string_ed_date, string_st_date):
return round(float(time_diff(string_ed_date, string_st_date).total_seconds()) / 3600, 6)
def now_datetime():
return convert_utc_to_user_timezone(datetime.datetime.utcnow())
def get_user_time_zone():
if getattr(frappe.local, "user_time_zone", None) is None:
frappe.local.user_time_zone = frappe.cache().get_value("time_zone")
if not frappe.local.user_time_zone:
frappe.local.user_time_zone = frappe.db.get_default('time_zone') or 'Asia/Calcutta'
frappe.cache().set_value("time_zone", frappe.local.user_time_zone)
return frappe.local.user_time_zone
def convert_utc_to_user_timezone(utc_timestamp):
from pytz import timezone, UnknownTimeZoneError
utcnow = timezone('UTC').localize(utc_timestamp)
try:
return utcnow.astimezone(timezone(get_user_time_zone()))
except UnknownTimeZoneError:
return utcnow
def now():
"""return current datetime as yyyy-mm-dd hh:mm:ss"""
if getattr(frappe.local, "current_date", None):
return getdate(frappe.local.current_date).strftime("%Y-%m-%d") + " " + \
now_datetime().strftime('%H:%M:%S.%f')
else:
return now_datetime().strftime('%Y-%m-%d %H:%M:%S.%f')
def nowdate():
"""return current date as yyyy-mm-dd"""
return now_datetime().strftime('%Y-%m-%d')
def today():
return nowdate()
def nowtime():
"""return current time in hh:mm"""
return now_datetime().strftime('%H:%M:%S.%f')
def get_first_day(dt, d_years=0, d_months=0):
"""
Returns the first day of the month for the date specified by date object
Also adds `d_years` and `d_months` if specified
"""
dt = getdate(dt)
# d_years, d_months are "deltas" to apply to dt
overflow_years, month = divmod(dt.month + d_months - 1, 12)
year = dt.year + d_years + overflow_years
return datetime.date(year, month + 1, 1)
def get_last_day(dt):
"""
Returns last day of the month using:
`get_first_day(dt, 0, 1) + datetime.timedelta(-1)`
"""
return get_first_day(dt, 0, 1) + datetime.timedelta(-1)
def get_datetime(datetime_str):
try:
return datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S.%f')
except TypeError:
if isinstance(datetime_str, datetime.datetime):
return datetime_str.replace(tzinfo=None)
else:
raise
except ValueError:
if datetime_str=='0000-00-00 00:00:00.000000':
return None
return datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
def get_datetime_str(datetime_obj):
if isinstance(datetime_obj, basestring):
datetime_obj = get_datetime(datetime_obj)
return datetime_obj.strftime('%Y-%m-%d %H:%M:%S.%f')
def formatdate(string_date=None, format_string=None):
"""
Convers the given string date to :data:`user_format`
User format specified in defaults
Examples:
* dd-mm-yyyy
* mm-dd-yyyy
* dd/mm/yyyy
"""
date = getdate(string_date) if string_date else now_datetime().date()
if format_string:
return babel.dates.format_date(date, format_string or "medium", locale=(frappe.local.lang or "").replace("-", "_"))
else:
if getattr(frappe.local, "user_format", None) is None:
frappe.local.user_format = frappe.db.get_default("date_format")
out = frappe.local.user_format or "yyyy-mm-dd"
try:
return out.replace("dd", date.strftime("%d"))\
.replace("mm", date.strftime("%m"))\
.replace("yyyy", date.strftime("%Y"))
except ValueError, e:
raise frappe.ValidationError, str(e)
def global_date_format(date):
"""returns date as 1 January 2012"""
formatted_date = getdate(date).strftime("%d %B %Y")
return formatted_date.startswith("0") and formatted_date[1:] or formatted_date
def has_common(l1, l2):
"""Returns truthy value if there are common elements in lists l1 and l2"""
return set(l1) & set(l2)
def flt(s, precision=None):
"""Convert to float (ignore commas)"""
if isinstance(s, basestring):
s = s.replace(',','')
try:
num = float(s)
if precision is not None:
num = rounded(num, precision)
except Exception:
num = 0
return num
def cint(s):
"""Convert to integer"""
try: num = int(float(s))
except: num = 0
return num
def cstr(s):
if isinstance(s, unicode):
return s
elif s==None:
return ''
elif isinstance(s, basestring):
return unicode(s, 'utf-8')
else:
return unicode(s)
def rounded(num, precision=0):
"""round method for round halfs to nearest even algorithm"""
precision = cint(precision)
multiplier = 10 ** precision
# avoid rounding errors
num = round(num * multiplier if precision else num, 8)
floor = math.floor(num)
decimal_part = num - floor
if decimal_part == 0.5:
num = floor if (floor % 2 == 0) else floor + 1
else:
num = round(num)
return (num / multiplier) if precision else num
def encode(obj, encoding="utf-8"):
if isinstance(obj, list):
out = []
for o in obj:
if isinstance(o, unicode):
out.append(o.encode(encoding))
else:
out.append(o)
return out
elif isinstance(obj, unicode):
return obj.encode(encoding)
else:
return obj
def parse_val(v):
"""Converts to simple datatypes from SQL query results"""
if isinstance(v, (datetime.date, datetime.datetime)):
v = unicode(v)
elif isinstance(v, datetime.timedelta):
v = ":".join(unicode(v).split(":")[:2])
elif isinstance(v, long):
v = int(v)
return v
def fmt_money(amount, precision=None, currency=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = None
if currency:
number_format = frappe.db.get_value("Currency", currency, "number_format")
if not number_format:
number_format = frappe.db.get_default("number_format") or "#,###.##"
decimal_str, comma_str, number_format_precision = get_number_format_info(number_format)
if precision is None:
precision = number_format_precision
amount = '%.*f' % (precision, flt(amount))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + ((precision and decimal_str) and (decimal_str + decimals) or "")
amount = minus + amount
if currency and frappe.defaults.get_global_default("hide_currency_symbol") != "Yes":
symbol = frappe.db.get_value("Currency", currency, "symbol") or currency
amount = symbol + " " + amount
return amount
number_format_info = {
"#,###.##": (".", ",", 2),
"#.###,##": (",", ".", 2),
"# ###.##": (".", " ", 2),
"# ###,##": (",", " ", 2),
"#'###.##": (".", "'", 2),
"#, ###.##": (".", ", ", 2),
"#,##,###.##": (".", ",", 2),
"#,###.###": (".", ",", 3),
"#.###": ("", ".", 0),
"#,###": ("", ",", 0)
}
def get_number_format_info(format):
return number_format_info.get(format) or (".", ",", 2)
#
# convet currency to words
#
def money_in_words(number, main_currency = None, fraction_currency=None):
"""
Returns string in words with currency and fraction currency.
"""
from frappe.utils import get_defaults
if not number or flt(number) < 0:
return ""
d = get_defaults()
if not main_currency:
main_currency = d.get('currency', 'INR')
if not fraction_currency:
fraction_currency = frappe.db.get_value("Currency", main_currency, "fraction") or "Cent"
n = "%.2f" % flt(number)
main, fraction = n.split('.')
if len(fraction)==1: fraction += '0'
number_format = frappe.db.get_value("Currency", main_currency, "number_format") or \
frappe.db.get_default("number_format") or "#,###.##"
in_million = True
if number_format == "#,##,###.##": in_million = False
out = main_currency + ' ' + in_words(main, in_million).title()
if cint(fraction):
out = out + ' and ' + in_words(fraction, in_million).title() + ' ' + fraction_currency
return out + ' only.'
#
# convert number to words
#
def in_words(integer, in_million=True):
"""
Returns string in words for the given integer.
"""
n=int(integer)
known = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten',
11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen',
19: 'nineteen', 20: 'twenty', 30: 'thirty', 40: 'forty', 50: 'fifty', 60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninety'}
def psn(n, known, xpsn):
import sys;
if n in known: return known[n]
bestguess, remainder = str(n), 0
if n<=20:
frappe.errprint(sys.stderr)
frappe.errprint(n)
frappe.errprint("How did this happen?")
assert 0
elif n < 100:
bestguess= xpsn((n//10)*10, known, xpsn) + '-' + xpsn(n%10, known, xpsn)
return bestguess
elif n < 1000:
bestguess= xpsn(n//100, known, xpsn) + ' ' + 'hundred'
remainder = n%100
else:
if in_million:
if n < 1000000:
bestguess= xpsn(n//1000, known, xpsn) + ' ' + 'thousand'
remainder = n%1000
elif n < 1000000000:
bestguess= xpsn(n//1000000, known, xpsn) + ' ' + 'million'
remainder = n%1000000
else:
bestguess= xpsn(n//1000000000, known, xpsn) + ' ' + 'billion'
remainder = n%1000000000
else:
if n < 100000:
bestguess= xpsn(n//1000, known, xpsn) + ' ' + 'thousand'
remainder = n%1000
elif n < 10000000:
bestguess= xpsn(n//100000, known, xpsn) + ' ' + 'lakh'
remainder = n%100000
else:
bestguess= xpsn(n//10000000, known, xpsn) + ' ' + 'crore'
remainder = n%10000000
if remainder:
if remainder >= 100:
comma = ','
else:
comma = ''
return bestguess + comma + ' ' + xpsn(remainder, known, xpsn)
else:
return bestguess
return psn(n, known, psn)
def is_html(text):
out = False
for key in ["
", "|<[^>]*>)')
def strip_html(text):
"""removes anything enclosed in and including <>"""
return _striptags_re.sub("", text)
def escape_html(text):
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c,c) for c in text)
def pretty_date(iso_datetime):
"""
Takes an ISO time and returns a string representing how
long ago the date represents.
Ported from PrettyDate by John Resig
"""
if not iso_datetime: return ''
import math
if isinstance(iso_datetime, basestring):
iso_datetime = datetime.datetime.strptime(iso_datetime, '%Y-%m-%d %H:%M:%S.%f')
now_dt = datetime.datetime.strptime(now(), '%Y-%m-%d %H:%M:%S.%f')
dt_diff = now_dt - iso_datetime
# available only in python 2.7+
# dt_diff_seconds = dt_diff.total_seconds()
dt_diff_seconds = dt_diff.days * 86400.0 + dt_diff.seconds
dt_diff_days = math.floor(dt_diff_seconds / 86400.0)
# differnt cases
if dt_diff_seconds < 60.0:
return 'just now'
elif dt_diff_seconds < 120.0:
return '1 minute ago'
elif dt_diff_seconds < 3600.0:
return '%s minutes ago' % cint(math.floor(dt_diff_seconds / 60.0))
elif dt_diff_seconds < 7200.0:
return '1 hour ago'
elif dt_diff_seconds < 86400.0:
return '%s hours ago' % cint(math.floor(dt_diff_seconds / 3600.0))
elif dt_diff_days == 1.0:
return 'Yesterday'
elif dt_diff_days < 7.0:
return '%s days ago' % cint(dt_diff_days)
elif dt_diff_days < 31.0:
return '%s week(s) ago' % cint(math.ceil(dt_diff_days / 7.0))
elif dt_diff_days < 365.0:
return '%s months ago' % cint(math.ceil(dt_diff_days / 30.0))
else:
return 'more than %s year(s) ago' % cint(math.floor(dt_diff_days / 365.0))
def comma_or(some_list):
return comma_sep(some_list, " or ")
def comma_and(some_list):
return comma_sep(some_list, " and ")
def comma_sep(some_list, sep):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [unicode(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["'%s'" % s for s in some_list]
return ", ".join(some_list[:-1]) + sep + some_list[-1]
else:
return some_list
def filter_strip_join(some_list, sep):
"""given a list, filter None values, strip spaces and join"""
return (cstr(sep)).join((cstr(a).strip() for a in filter(None, some_list)))
def get_url(uri=None, full_address=False):
"""get app url from request"""
host_name = frappe.local.conf.host_name
if not host_name:
if hasattr(frappe.local, "request") and frappe.local.request and frappe.local.request.host:
protocol = 'https' == frappe.get_request_header('X-Forwarded-Proto', "") and 'https://' or 'http://'
host_name = protocol + frappe.local.request.host
elif frappe.local.site:
host_name = "http://{}".format(frappe.local.site)
else:
host_name = frappe.db.get_value("Website Settings", "Website Settings",
"subdomain")
if host_name and "http" not in host_name:
host_name = "http://" + host_name
if not host_name:
host_name = "http://localhost"
if not uri and full_address:
uri = frappe.get_request_header("REQUEST_URI", "")
url = urllib.basejoin(host_name, uri) if uri else host_name
return url
def get_url_to_form(doctype, name, label=None):
if not label: label = name
return """%(label)s""" % locals()
operator_map = {
# startswith
"^": lambda (a, b): (a or "").startswith(b),
# in or not in a list
"in": lambda (a, b): operator.contains(b, a),
"not in": lambda (a, b): not operator.contains(b, a),
# comparison operators
"=": lambda (a, b): operator.eq(a, b),
"!=": lambda (a, b): operator.ne(a, b),
">": lambda (a, b): operator.gt(a, b),
"<": lambda (a, b): operator.lt(a, b),
">=": lambda (a, b): operator.ge(a, b),
"<=": lambda (a, b): operator.le(a, b),
"not None": lambda (a, b): a and True or False,
"None": lambda (a, b): (not a) and True or False
}
def compare(val1, condition, val2):
ret = False
if condition in operator_map:
ret = operator_map[condition]((val1, val2))
return ret
def scrub_urls(html):
html = expand_relative_urls(html)
html = quote_urls(html)
return html
def expand_relative_urls(html):
# expand relative urls
url = get_url()
if url.endswith("/"): url = url[:-1]
def _expand_relative_urls(match):
to_expand = list(match.groups())
if not to_expand[2].startswith("/"):
to_expand[2] = "/" + to_expand[2]
to_expand.insert(2, url)
return "".join(to_expand)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?!http)[^\'" >]+)([\'"]?)', _expand_relative_urls, html)
def quote_urls(html):
def _quote_url(match):
groups = list(match.groups())
groups[2] = urllib.quote(groups[2].encode("utf-8"), safe=b"~@#$&()*!+=:;,.?/'").decode("utf-8")
return "".join(groups)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?:http)[^\'">]+)([\'"]?)',
_quote_url, html)
def unique(seq):
"""use this instead of list(set()) to preserve order of the original list.
Thanks to Stackoverflow: http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order"""
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x)) ]
"""
Base classes and utility functions for Data API service classes.
"""
__author__ = 'Dan Gunter '
__date__ = '12/24/15'
# Imports
# -------
# Stdlib
import functools
import logging
import os
import signal
import time
import traceback
# Third party
import twisted.internet
import twisted.web
from thrift.transport import THttpClient
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTwisted
# Local
from doekbase.data_api import exceptions, util
# Global constants and variables
# ------------------------------
DEFAULT_WS_URL = 'https://ci.kbase.us/services/ws/'
DEFAULT_SHOCK_URL = 'https://ci.kbase.us/services/shock-api/'
SERVICES_DICT = {'workspace_service_url': DEFAULT_WS_URL,
'shock_service_url' : DEFAULT_SHOCK_URL}
# Functions and classes
# ---------------------
def server_method(func):
"""Decorator for service methods.
The wrapper depends on the existence of two attributes in the
class being wrapped:
1. ttypes (module): Thrift type module, containing exception classes
2. log (logging.Logger): Logger instance
Args:
func (function): Function being wrapped
"""
def wrapper(self, token, ref, *args, **kwargs):
assert hasattr(self, 'log'), 'Method in wrapped class must have "log" ' \
'attribute'
assert hasattr(self, 'ttypes'), 'Method in wrapped class must have ' \
'"ttypes" attribute'
error, result = None, None
#self.log.debug('method={meth} state=begin token={tok} ref={ref} args={'
self.log.debug('method={meth} state=begin ref={ref} args={'
'args} kwargs={kw}'
.format(meth=func.__name__, tok=token, ref=ref,
args=args, kw=kwargs))
t0 = time.time()
try:
result = func(self, token, ref, *args, **kwargs)
except AttributeError, e:
error = e
raise self.ttypes.AttributeException(str(e.message),
traceback.format_exc())
except exceptions.AuthenticationError, e:
error = e
raise self.ttypes.AuthenticationException(str(e.message),
traceback.format_exc())
except exceptions.AuthorizationError, e:
error = e
raise self.ttypes.AuthorizationException(str(e.message),
traceback.format_exc())
except TypeError, e:
error = e
raise self.ttypes.TypeException(str(e.message), traceback.format_exc())
except Exception, e:
error = e
raise self.ttypes.ServiceException(str(e.message),
traceback.format_exc(),
{"ref": str(ref)})
finally:
if error is None:
#self.log.debug('method={meth} state=end token={tok} ref={ref} '
self.log.debug('method={meth} state=end ref={ref} '
'args={args} kwargs={kw} dur={t:.3f}'
.format(meth=func.__name__, tok=token, ref=ref,
args=args, kw=kwargs,
t=time.time() - t0))
else:
#self.log.error('method={meth} state=error token={tok} '
self.log.error('method={meth} state=error '
'ref={ref} args={args} kwargs={kw}'
'error_message="{m}" dur={t:.3f}'
.format(meth=func.__name__, tok=token, ref=ref,
args=args, kw=kwargs, m=str(error),
t=time.time() - t0))
return result
return wrapper
class BaseService(object):
"""Base class for Data API service classes, which will be defined
in the 'interface' module of the appropriate API subdirectory.
Takes care of some boilerplate logging and error-checking, as well
as setting up instance variables for the @server_method decorator.
"""
def __init__(self, log, ttypes_module, api_class, services=None):
"""Constructor.
Args:
log (logging.Logger): For logging service activity
ttypes_module: Thrift ttypes module for the API
api_class: the API library class, e.g.,
`doekbase.data_api.taxonomy.taxon.api.TaxonAPI`
services (dict): Service configuration dictionary, passed to
constructor of the `api_class`.
"""
self.log = log
self.ttypes = ttypes_module
self._api_class = api_class
self.log.debug('method=__init__ state=begin services={s}'
.format(s=services))
try:
if services is None or not isinstance(services, dict):
raise TypeError("You must provide a service configuration " +
"dictionary! Found {0}".format(type(services)))
elif not services.has_key("workspace_service_url"):
raise KeyError("Expecting workspace_service_url key!")
except Exception as e:
self.log.error('method=__init__ state=error services={s}'
'error_message="{m}"'
.format(s=services, m=e.message))
raise
self.services = services
self.log.debug('method=__init__ state=end services={s} '
.format(s=services))
def _get_instance(self, *args):
"""Return an instance of the API. Use this level of indirection to
allow future optimizations over creating it each time.
"""
return self._api_class(self.services, *args)
class BaseClientConnection(object):
"""Base class for ClientConnection objects defined
in the data_api..service.interface module.
"""
def __init__(self, thrift_client, url):
if not hasattr(thrift_client, 'Client') or not callable(
thrift_client.Client):
raise AttributeError('Invalid "thrift_client" argument')
self.client = None
self.transport = None
self.protocol = None
try:
self.transport = THttpClient.THttpClient(url)
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.client = thrift_client.Client(self.protocol)
except AssertionError:
raise ValueError('Invalid Thrift client URL: "{}"'.format(url))
except TTransport.TTransportException as err:
raise RuntimeError(
'Cannot connect to remote Thrift service at {}: {}'
.format(url, err.message))
def get_client(self):
return self.transport, self.client
# For service drivers
def start_service(api_class, service_class, log,
services=None, host='localhost', port=9100, killprocgrp=False):
"""Start a Data API service.
Args:
api_class (BaseService): The custom API service class, e.g.,
`doekbase.data_api.taxonomy.taxon.api.TaxonService`
service_class (type): The Thrift auto-generated service class
log (logging.Logger): Logging object
services (dict): Service configuration dictionary, passed to
constructor of the `api_class`.
host (str): Service host (will default to 'localhost')
port (int): Service port, e.g. 9101
killprocgrp (bool): if True, kill process group on exit
"""
assert issubclass(api_class, BaseService), \
'Invalid "api_class": must be a subclass of ' \
'doekbase.data_api.service_core.BaseService'
assert hasattr(service_class, 'Processor'), 'Invalid "service_class": ' \
'missing "Processor" attribute'
assert isinstance(port, int), 'The "port" must be an integer'
svc_t0 = util.log_start(log, 'start_service',
kvp=dict(host=host, port=port))
# Create server
services = services or SERVICES_DICT
handler = api_class(services)
processor = service_class.Processor(handler)
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
resource = TTwisted.ThriftResource(processor, pfactory, pfactory)
site = twisted.web.server.Site(resource=resource)
twisted.internet.reactor.listenTCP(port, site, interface=host)
# Kill entire process group on shutdown
if killprocgrp:
twisted.internet.reactor.addSystemEventTrigger('before', 'shutdown',
functools.partial(
kill_process_group,
log=log))
# Run server
sname = api_class.__name__
shost = host or 'localhost'
util.log_start(log, 'server', kvp=dict(name=sname, host=shost, port=port))
t0 = util.log_start(log, 'twisted.internet.reactor.run',
level=logging.DEBUG)
try:
twisted.internet.reactor.run()
except Exception as err:
log.error('msg="Abort {} server on error"'.format(sname))
util.log_end(log, t0, 'twisted.internet.reactor.run',
status_code=1, level=logging.ERROR, kvp=dict(msg=err))
raise
finally:
util.log_end(log, t0, 'twisted.internet.reactor.run')
util.log_end(log, svc_t0, 'start_service',
kvp=dict(host=host, port=port))
return 0
def stop_service():
twisted.internet.reactor.stop()
def kill_process_group(log):
"""Kill entire process group on Twisted shutdown.
Args:
log (logging.Logger): Logger
"""
pid = os.getpid() # my pid
grpid = -os.getpgid(pid) # my process group
signo = signal.SIGINT # the signal to send
t = util.log_start(log, 'kill_process_group', level=logging.WARN,
kvp=dict(pid=pid, group_pid=grpid, signal=signo))
# ignore signal in this process (Twisted is already shutting down)
signal.signal(signo, signal.SIG_IGN)
# send the signal to my process group
os.kill(grpid, signo)
util.log_end(log, t, 'kill_process_group', level=logging.WARN,
kvp=dict(pid=pid, group_pid=grpid, signal=signo))
CHAR_MAX = 127
CHAR_MIN = -128
DBL_MAX = 1.7976931348623157e+308
DBL_MIN = 2.2250738585072014e-308
FLT_MAX = 3.4028234663852886e+38
FLT_MIN = 1.1754943508222875e-38
INT_MAX = 2147483647
INT_MIN = -2147483648
LLONG_MAX = 9223372036854775807
LLONG_MIN = -9223372036854775808
LONG_MAX = 2147483647
LONG_MIN = -2147483648
PY_SSIZE_T_MAX = 2147483647
PY_SSIZE_T_MIN = -2147483648
SHRT_MAX = 32767
SHRT_MIN = -32768
SIZEOF_PYGC_HEAD = 16
UCHAR_MAX = 255
UINT_MAX = 4294967295
ULLONG_MAX = 18446744073709551615
ULONG_MAX = 4294967295
USHRT_MAX = 65535
__loader__ = "<_frozen_importlib.ExtensionFileLoader object at 0x00C98DD0>"
def _pending_threadfunc(*args,**kw):
pass
class _test_structmembersType(object):
pass
def _test_thread_state(*args,**kw):
pass
def argparsing(*args,**kw):
pass
def code_newempty(*args,**kw):
pass
def codec_incrementaldecoder(*args,**kw):
pass
def codec_incrementalencoder(*args,**kw):
pass
def crash_no_current_thread(*args,**kw):
pass
class error(Exception):
pass
def exception_print(*args,**kw):
pass
def getargs_B(*args,**kw):
pass
def getargs_H(*args,**kw):
pass
def getargs_I(*args,**kw):
pass
def getargs_K(*args,**kw):
pass
def getargs_L(*args,**kw):
pass
def getargs_Z(*args,**kw):
pass
def getargs_Z_hash(*args,**kw):
pass
def getargs_b(*args,**kw):
pass
def getargs_c(*args,**kw):
pass
def getargs_h(*args,**kw):
pass
def getargs_i(*args,**kw):
pass
def getargs_k(*args,**kw):
pass
def getargs_keyword_only(*args,**kw):
pass
def getargs_keywords(*args,**kw):
pass
def getargs_l(*args,**kw):
pass
def getargs_n(*args,**kw):
pass
def getargs_p(*args,**kw):
pass
def getargs_s(*args,**kw):
pass
def getargs_s_hash(*args,**kw):
pass
def getargs_s_star(*args,**kw):
pass
def getargs_tuple(*args,**kw):
pass
def getargs_u(*args,**kw):
pass
def getargs_u_hash(*args,**kw):
pass
def getargs_w_star(*args,**kw):
pass
def getargs_y(*args,**kw):
pass
def getargs_y_hash(*args,**kw):
pass
def getargs_y_star(*args,**kw):
pass
def getargs_z(*args,**kw):
pass
def getargs_z_hash(*args,**kw):
pass
def getargs_z_star(*args,**kw):
pass
class instancemethod(object):
pass
def make_exception_with_doc(*args,**kw):
pass
def make_memoryview_from_NULL_pointer(*args,**kw):
pass
def parse_tuple_and_keywords(*args,**kw):
pass
def pytime_object_to_time_t(*args,**kw):
pass
def pytime_object_to_timespec(*args,**kw):
pass
def pytime_object_to_timeval(*args,**kw):
pass
def raise_exception(*args,**kw):
pass
def raise_memoryerror(*args,**kw):
pass
def run_in_subinterp(*args,**kw):
pass
def set_exc_info(*args,**kw):
pass
def test_L_code(*args,**kw):
pass
def test_Z_code(*args,**kw):
pass
def test_capsule(*args,**kw):
pass
def test_config(*args,**kw):
pass
def test_datetime_capi(*args,**kw):
pass
def test_dict_iteration(*args,**kw):
pass
def test_empty_argparse(*args,**kw):
pass
def test_k_code(*args,**kw):
pass
def test_lazy_hash_inheritance(*args,**kw):
pass
def test_list_api(*args,**kw):
pass
def test_long_and_overflow(*args,**kw):
pass
def test_long_api(*args,**kw):
pass
def test_long_as_double(*args,**kw):
pass
def test_long_as_size_t(*args,**kw):
pass
def test_long_long_and_overflow(*args,**kw):
pass
def test_long_numbits(*args,**kw):
pass
def test_longlong_api(*args,**kw):
pass
def test_null_strings(*args,**kw):
pass
def test_s_code(*args,**kw):
pass
def test_string_from_format(*args,**kw):
pass
def test_string_to_double(*args,**kw):
pass
def test_u_code(*args,**kw):
pass
def test_unicode_compare_with_ascii(*args,**kw):
pass
def test_widechar(*args,**kw):
pass
def test_with_docstring(*args,**kw):
"""This is a pretty normal docstring."""
pass
def traceback_print(*args,**kw):
pass
def unicode_aswidechar(*args,**kw):
pass
def unicode_aswidecharstring(*args,**kw):
pass
def unicode_encodedecimal(*args,**kw):
pass
def unicode_transformdecimaltoascii(*args,**kw):
pass
import wx
from Tree import Tree
from wx.lib.customtreectrl import wxEVT_TREE_ITEM_ACTIVATED
class MNotExpandOnDClick(object):
def __init__(self):
self.Bind(wx.EVT_LEFT_DCLICK, self.__on_left_dclick)
def __on_left_dclick(self, event):
itemId, flags = self.HitTest(event.GetPosition())
if flags & (wx.TREE_HITTEST_ONITEMLABEL | wx.TREE_HITTEST_ONITEMICON):
self.AddPendingEvent(wx.TreeEvent(wxEVT_TREE_ITEM_ACTIVATED, self, self.GetSelection()))
else:
event.Skip()
"""
def test():
from TreeNode import TreeNode
class MyTreeNode(TreeNode):
def __init__(self, id):
TreeNode.__init__(self)
self.id = id
def getId(self):
return self.id
def children(self):
return (MyTreeNode(self.id*10+1), MyTreeNode(self.id*10+2), MyTreeNode(self.id*10+3))
def getText(self):
return str(self.id)
class MyTree(Tree, MNotExpandOnDClick):
def __init__(self, *p, **pp):
Tree.__init__(self, *p, **pp)
MNotExpandOnDClick.__init__(self)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.__onSelectionChanged)
def __onSelectionChanged(self, event):
print "SEL CHANGED:", self.getNodeForEvent(event).getId()
def roots(self):
return (MyTreeNode(1), MyTreeNode(2), MyTreeNode(3))
def oninit(self):
self.t = MyTree(self)
self.t.refresh()
def ondestroy(self):
pass
def ontimer(self):
#self.t.selectIdPath([1, 12])
pass
from toolib.wx.TestApp import TestApp
TestApp(oninit, ondestroy, ontimer=ontimer).MainLoop()
if __name__ == '__main__':
test()
"""
# $Id: pjsua_app.py 4724 2014-01-31 08:52:09Z nanang $
#
# Sample and simple Python script to make and receive calls, and do
# presence and instant messaging/IM using PJSUA-API binding for Python.
#
# Copyright (C) 2003-2007 Benny Prijono
#
import py_pjsua
import sys
import thread
#
# Configurations
#
THIS_FILE = "pjsua_app.py"
C_QUIT = 0
C_LOG_LEVEL = 4
# STUN config.
# Set C_STUN_HOST to the address:port of the STUN server to enable STUN
#
C_STUN_HOST = ""
#C_STUN_HOST = "192.168.0.2"
#C_STUN_HOST = "stun.iptel.org:3478"
# SIP port
C_SIP_PORT = 5060
# Globals
#
g_ua_cfg = None
g_acc_id = py_pjsua.PJSUA_INVALID_ID
g_current_call = py_pjsua.PJSUA_INVALID_ID
g_wav_files = []
g_wav_id = 0
g_wav_port = 0
g_rec_file = ""
g_rec_id = 0
g_rec_port = 0
# Utility: display PJ error and exit
#
def err_exit(title, rc):
py_pjsua.perror(THIS_FILE, title, rc)
py_pjsua.destroy()
exit(1)
# Logging function (also callback, called by pjsua-lib)
#
def log_cb(level, str, len):
if level <= C_LOG_LEVEL:
print str,
def write_log(level, str):
log_cb(level, str + "\n", 0)
# Utility to get call info
#
def call_name(call_id):
ci = py_pjsua.call_get_info(call_id)
return "[Call " + `call_id` + " " + ci.remote_info + "]"
# Callback when call state has changed.
#
def on_call_state(call_id, e):
global g_current_call
ci = py_pjsua.call_get_info(call_id)
write_log(3, call_name(call_id) + " state = " + `ci.state_text`)
if ci.state == py_pjsua.PJSIP_INV_STATE_DISCONNECTED:
g_current_call = py_pjsua.PJSUA_INVALID_ID
# Callback for incoming call
#
def on_incoming_call(acc_id, call_id, rdata):
global g_current_call
if g_current_call != py_pjsua.PJSUA_INVALID_ID:
# There's call in progress - answer Busy
py_pjsua.call_answer(call_id, 486, None, None)
return
g_current_call = call_id
ci = py_pjsua.call_get_info(call_id)
write_log(3, "*** Incoming call: " + call_name(call_id) + "***")
write_log(3, "*** Press a to answer or h to hangup ***")
# Callback when media state has changed (e.g. established or terminated)
#
def on_call_media_state(call_id):
ci = py_pjsua.call_get_info(call_id)
if ci.media_status == py_pjsua.PJSUA_CALL_MEDIA_ACTIVE:
py_pjsua.conf_connect(ci.conf_slot, 0)
py_pjsua.conf_connect(0, ci.conf_slot)
write_log(3, call_name(call_id) + ": media is active")
else:
write_log(3, call_name(call_id) + ": media is inactive")
# Callback when account registration state has changed
#
def on_reg_state(acc_id):
acc_info = py_pjsua.acc_get_info(acc_id)
if acc_info.has_registration != 0:
cmd = "registration"
else:
cmd = "unregistration"
if acc_info.status != 0 and acc_info.status != 200:
write_log(3, "Account " + cmd + " failed: rc=" + `acc_info.status` + " " + acc_info.status_text)
else:
write_log(3, "Account " + cmd + " success")
# Callback when buddy's presence state has changed
#
def on_buddy_state(buddy_id):
write_log(3, "On Buddy state called")
buddy_info = py_pjsua.buddy_get_info(buddy_id)
if buddy_info.status != 0 and buddy_info.status != 200:
write_log(3, "Status of " + `buddy_info.uri` + " is " + `buddy_info.status_text`)
else:
write_log(3, "Status : " + `buddy_info.status`)
# Callback on incoming pager (MESSAGE)
#
def on_pager(call_id, strfrom, strto, contact, mime_type, text):
write_log(3, "MESSAGE from " + `strfrom` + " : " + `text`)
# Callback on the delivery status of outgoing pager (MESSAGE)
#
def on_pager_status(call_id, strto, body, user_data, status, reason):
write_log(3, "MESSAGE to " + `strto` + " status " + `status` + " reason " + `reason`)
# Received typing indication
#
def on_typing(call_id, strfrom, to, contact, is_typing):
str_t = ""
if is_typing:
str_t = "is typing.."
else:
str_t = "has stopped typing"
write_log(3, "IM indication: " + strfrom + " " + str_t)
# Received the status of previous call transfer request
#
def on_call_transfer_status(call_id,status_code,status_text,final,p_cont):
strfinal = ""
if final == 1:
strfinal = "[final]"
write_log(3, "Call " + `call_id` + ": transfer status= " + `status_code` + " " + status_text+ " " + strfinal)
if status_code/100 == 2:
write_log(3, "Call " + `call_id` + " : call transferred successfully, disconnecting call")
status = py_pjsua.call_hangup(call_id, 410, None, None)
p_cont = 0
# Callback on incoming call transfer request
#
def on_call_transfer_request(call_id, dst, code):
write_log(3, "Call transfer request from " + `call_id` + " to " + dst + " with code " + `code`)
#
# Initialize pjsua.
#
def app_init():
global g_acc_id, g_ua_cfg
# Create pjsua before anything else
status = py_pjsua.create()
if status != 0:
err_exit("pjsua create() error", status)
# Create and initialize logging config
log_cfg = py_pjsua.logging_config_default()
log_cfg.level = C_LOG_LEVEL
log_cfg.cb = log_cb
# Create and initialize pjsua config
# Note: for this Python module, thread_cnt must be 0 since Python
# doesn't like to be called from alien thread (pjsua's thread
# in this case)
ua_cfg = py_pjsua.config_default()
ua_cfg.thread_cnt = 0
ua_cfg.user_agent = "PJSUA/Python 0.1"
ua_cfg.cb.on_incoming_call = on_incoming_call
ua_cfg.cb.on_call_media_state = on_call_media_state
ua_cfg.cb.on_reg_state = on_reg_state
ua_cfg.cb.on_call_state = on_call_state
ua_cfg.cb.on_buddy_state = on_buddy_state
ua_cfg.cb.on_pager = on_pager
ua_cfg.cb.on_pager_status = on_pager_status
ua_cfg.cb.on_typing = on_typing
ua_cfg.cb.on_call_transfer_status = on_call_transfer_status
ua_cfg.cb.on_call_transfer_request = on_call_transfer_request
# Configure STUN setting
if C_STUN_HOST != "":
ua_cfg.stun_host = C_STUN_HOST;
# Create and initialize media config
med_cfg = py_pjsua.media_config_default()
med_cfg.ec_tail_len = 0
#
# Initialize pjsua!!
#
status = py_pjsua.init(ua_cfg, log_cfg, med_cfg)
if status != 0:
err_exit("pjsua init() error", status)
# Configure UDP transport config
transport_cfg = py_pjsua.transport_config_default()
transport_cfg.port = C_SIP_PORT
# Create UDP transport
status, transport_id = \
py_pjsua.transport_create(py_pjsua.PJSIP_TRANSPORT_UDP, transport_cfg)
if status != 0:
err_exit("Error creating UDP transport", status)
# Create initial default account
status, acc_id = py_pjsua.acc_add_local(transport_id, 1)
if status != 0:
err_exit("Error creating account", status)
g_acc_id = acc_id
g_ua_cfg = ua_cfg
# Add SIP account interractively
#
def add_account():
global g_acc_id
acc_domain = ""
acc_username = ""
acc_passwd =""
confirm = ""
# Input account configs
print "Your SIP domain (e.g. myprovider.com): ",
acc_domain = sys.stdin.readline()
if acc_domain == "\n":
return
acc_domain = acc_domain.replace("\n", "")
print "Your username (e.g. alice): ",
acc_username = sys.stdin.readline()
if acc_username == "\n":
return
acc_username = acc_username.replace("\n", "")
print "Your password (e.g. secret): ",
acc_passwd = sys.stdin.readline()
if acc_passwd == "\n":
return
acc_passwd = acc_passwd.replace("\n", "")
# Configure account configuration
acc_cfg = py_pjsua.acc_config_default()
acc_cfg.id = "sip:" + acc_username + "@" + acc_domain
acc_cfg.reg_uri = "sip:" + acc_domain
cred_info = py_pjsua.Pjsip_Cred_Info()
cred_info.realm = "*"
cred_info.scheme = "digest"
cred_info.username = acc_username
cred_info.data_type = 0
cred_info.data = acc_passwd
acc_cfg.cred_info.append(1)
acc_cfg.cred_info[0] = cred_info
# Add new SIP account
status, acc_id = py_pjsua.acc_add(acc_cfg, 1)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error adding SIP account", status)
else:
g_acc_id = acc_id
write_log(3, "Account " + acc_cfg.id + " added")
def add_player():
global g_wav_files
global g_wav_id
global g_wav_port
file_name = ""
status = -1
wav_id = 0
print "Enter the path of the file player(e.g. /tmp/audio.wav): ",
file_name = sys.stdin.readline()
if file_name == "\n":
return
file_name = file_name.replace("\n", "")
status, wav_id = py_pjsua.player_create(file_name, 0)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error adding file player ", status)
else:
g_wav_files.append(file_name)
if g_wav_id == 0:
g_wav_id = wav_id
g_wav_port = py_pjsua.player_get_conf_port(wav_id)
write_log(3, "File player " + file_name + " added")
def add_recorder():
global g_rec_file
global g_rec_id
global g_rec_port
file_name = ""
status = -1
rec_id = 0
print "Enter the path of the file recorder(e.g. /tmp/audio.wav): ",
file_name = sys.stdin.readline()
if file_name == "\n":
return
file_name = file_name.replace("\n", "")
status, rec_id = py_pjsua.recorder_create(file_name, 0, None, 0, 0)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error adding file recorder ", status)
else:
g_rec_file = file_name
g_rec_id = rec_id
g_rec_port = py_pjsua.recorder_get_conf_port(rec_id)
write_log(3, "File recorder " + file_name + " added")
def conf_list():
ports = None
print "Conference ports : "
ports = py_pjsua.enum_conf_ports()
for port in ports:
info = None
info = py_pjsua.conf_get_port_info(port)
txlist = ""
for listener in info.listeners:
txlist = txlist + "#" + `listener` + " "
print "Port #" + `info.slot_id` + "[" + `(info.clock_rate/1000)` + "KHz/" + `(info.samples_per_frame * 1000 / info.clock_rate)` + "ms] " + info.name + " transmitting to: " + txlist
def connect_port():
src_port = 0
dst_port = 0
print "Connect src port # (empty to cancel): "
src_port = sys.stdin.readline()
if src_port == "\n":
return
src_port = src_port.replace("\n", "")
src_port = int(src_port)
print "To dst port # (empty to cancel): "
dst_port = sys.stdin.readline()
if dst_port == "\n":
return
dst_port = dst_port.replace("\n", "")
dst_port = int(dst_port)
status = py_pjsua.conf_connect(src_port, dst_port)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error connecting port ", status)
else:
write_log(3, "Port connected from " + `src_port` + " to " + `dst_port`)
def disconnect_port():
src_port = 0
dst_port = 0
print "Disconnect src port # (empty to cancel): "
src_port = sys.stdin.readline()
if src_port == "\n":
return
src_port = src_port.replace("\n", "")
src_port = int(src_port)
print "From dst port # (empty to cancel): "
dst_port = sys.stdin.readline()
if dst_port == "\n":
return
dst_port = dst_port.replace("\n", "")
dst_port = int(dst_port)
status = py_pjsua.conf_disconnect(src_port, dst_port)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error disconnecting port ", status)
else:
write_log(3, "Port disconnected " + `src_port` + " from " + `dst_port`)
def dump_call_quality():
global g_current_call
buf = ""
if g_current_call != -1:
buf = py_pjsua.call_dump(g_current_call, 1, 1024, " ")
write_log(3, "\n" + buf)
else:
write_log(3, "No current call")
def xfer_call():
global g_current_call
if g_current_call == -1:
write_log(3, "No current call")
else:
call = g_current_call
ci = py_pjsua.call_get_info(g_current_call)
print "Transferring current call ["+ `g_current_call` + "] " + ci.remote_info
print "Enter sip url : "
url = sys.stdin.readline()
if url == "\n":
return
url = url.replace("\n", "")
if call != g_current_call:
print "Call has been disconnected"
return
msg_data = py_pjsua.msg_data_init()
status = py_pjsua.call_xfer(g_current_call, url, msg_data);
if status != 0:
py_pjsua.perror(THIS_FILE, "Error transferring call ", status)
else:
write_log(3, "Call transferred to " + url)
def xfer_call_replaces():
if g_current_call == -1:
write_log(3, "No current call")
else:
call = g_current_call
ids = py_pjsua.enum_calls()
if len(ids) <= 1:
print "There are no other calls"
return
ci = py_pjsua.call_get_info(g_current_call)
print "Transfer call [" + `g_current_call` + "] " + ci.remote_info + " to one of the following:"
for i in range(0, len(ids)):
if ids[i] == call:
continue
call_info = py_pjsua.call_get_info(ids[i])
print `ids[i]` + " " + call_info.remote_info + " [" + call_info.state_text + "]"
print "Enter call number to be replaced : "
buf = sys.stdin.readline()
buf = buf.replace("\n","")
if buf == "":
return
dst_call = int(buf)
if call != g_current_call:
print "Call has been disconnected"
return
if dst_call == call:
print "Destination call number must not be the same as the call being transferred"
return
if dst_call >= py_pjsua.PJSUA_MAX_CALLS:
print "Invalid destination call number"
return
if py_pjsua.call_is_active(dst_call) == 0:
print "Invalid destination call number"
return
py_pjsua.call_xfer_replaces(call, dst_call, 0, None)
#
# Worker thread function.
# Python doesn't like it when it's called from an alien thread
# (pjsua's worker thread, in this case), so for Python we must
# disable worker thread in pjsua and poll pjsua from Python instead.
#
def worker_thread_main(arg):
global C_QUIT
thread_desc = 0;
status = py_pjsua.thread_register("python worker", thread_desc)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error registering thread", status)
else:
while C_QUIT == 0:
py_pjsua.handle_events(50)
print "Worker thread quitting.."
C_QUIT = 2
# Start pjsua
#
def app_start():
# Done with initialization, start pjsua!!
#
status = py_pjsua.start()
if status != 0:
err_exit("Error starting pjsua!", status)
# Start worker thread
thr = thread.start_new(worker_thread_main, (0,))
print "PJSUA Started!!"
# Print account and buddy list
def print_acc_buddy_list():
global g_acc_id
acc_ids = py_pjsua.enum_accs()
print "Account list:"
for acc_id in acc_ids:
acc_info = py_pjsua.acc_get_info(acc_id)
if acc_info.has_registration == 0:
acc_status = acc_info.status_text
else:
acc_status = `acc_info.status` + "/" + acc_info.status_text + " (expires=" + `acc_info.expires` + ")"
if acc_id == g_acc_id:
print " *",
else:
print " ",
print "[" + `acc_id` + "] " + acc_info.acc_uri + ": " + acc_status
print " Presence status: ",
if acc_info.online_status != 0:
print "Online"
else:
print "Invisible"
if py_pjsua.get_buddy_count() > 0:
print ""
print "Buddy list:"
buddy_ids = py_pjsua.enum_buddies()
for buddy_id in buddy_ids:
bi = py_pjsua.buddy_get_info(buddy_id)
print " [" + `buddy_id` + "] " + bi.status_text + " " + bi.uri
# Print application menu
#
def print_menu():
print ""
print ">>>"
print_acc_buddy_list()
print """
+============================================================================+
| Call Commands : | Buddy, IM & Presence: | Account: |
| | | |
| m Make call | +b Add buddy | +a Add account |
| a Answer current call | -b Delete buddy | -a Delete accnt |
| h Hangup current call | | |
| H Hold call | i Send instant message | rr register |
| v re-inVite (release Hold) | s Subscribe presence | ru Unregister |
| # Send DTMF string | u Unsubscribe presence | |
| dq Dump curr. call quality | t ToGgle Online status | |
| +--------------------------+------------------+
| x Xfer call | Media Commands: | Status: |
| X Xfer with Replaces | | |
| | cl List ports | d Dump status |
| | cc Connect port | dd Dump detail |
| | cd Disconnect port | |
| | +p Add file player | |
|------------------------------+ +r Add file recorder | |
| q Quit application | | |
+============================================================================+"""
print "You have " + `py_pjsua.call_get_count()` + " active call(s)"
print ">>>",
# Menu
#
def app_menu():
global g_acc_id
global g_current_call
quit = 0
while quit == 0:
print_menu()
choice = sys.stdin.readline()
if choice[0] == "q":
quit = 1
elif choice[0] == "i":
# Sending IM
print "Send IM to SIP URL: ",
url = sys.stdin.readline()
if url == "\n":
continue
# Send typing indication
py_pjsua.im_typing(g_acc_id, url, 1, None)
print "The content: ",
message = sys.stdin.readline()
if message == "\n":
py_pjsua.im_typing(g_acc_id, url, 0, None)
continue
# Send the IM!
py_pjsua.im_send(g_acc_id, url, None, message, None, 0)
elif choice[0] == "m":
# Make call
print "Using account ", g_acc_id
print "Make call to SIP URL: ",
url = sys.stdin.readline()
url = url.replace("\n", "")
if url == "":
continue
# Initiate the call!
status, call_id = py_pjsua.call_make_call(g_acc_id, url, 0, 0, None)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error making call", status)
else:
g_current_call = call_id
elif choice[0] == "+" and choice[1] == "b":
# Add new buddy
bc = py_pjsua.Buddy_Config()
print "Buddy URL: ",
bc.uri = sys.stdin.readline()
if bc.uri == "\n":
continue
bc.uri = bc.uri.replace("\n", "")
bc.subscribe = 1
status, buddy_id = py_pjsua.buddy_add(bc)
if status != 0:
py_pjsua.perror(THIS_FILE, "Error adding buddy", status)
elif choice[0] == "-" and choice[1] == "b":
print "Enter buddy ID to delete : "
buf = sys.stdin.readline()
buf = buf.replace("\n","")
if buf == "":
continue
i = int(buf)
if py_pjsua.buddy_is_valid(i) == 0:
print "Invalid buddy id " + `i`
else:
py_pjsua.buddy_del(i)
print "Buddy " + `i` + " deleted"
elif choice[0] == "+" and choice[1] == "a":
# Add account
add_account()
elif choice[0] == "-" and choice[1] == "a":
print "Enter account ID to delete : "
buf = sys.stdin.readline()
buf = buf.replace("\n","")
if buf == "":
continue
i = int(buf)
if py_pjsua.acc_is_valid(i) == 0:
print "Invalid account id " + `i`
else:
py_pjsua.acc_del(i)
print "Account " + `i` + " deleted"
elif choice[0] == "+" and choice[1] == "p":
add_player()
elif choice[0] == "+" and choice[1] == "r":
add_recorder()
elif choice[0] == "c" and choice[1] == "l":
conf_list()
elif choice[0] == "c" and choice[1] == "c":
connect_port()
elif choice[0] == "c" and choice[1] == "d":
disconnect_port()
elif choice[0] == "d" and choice[1] == "q":
dump_call_quality()
elif choice[0] == "x":
xfer_call()
elif choice[0] == "X":
xfer_call_replaces()
elif choice[0] == "h":
if g_current_call != py_pjsua.PJSUA_INVALID_ID:
py_pjsua.call_hangup(g_current_call, 603, None, None)
else:
print "No current call"
elif choice[0] == "H":
if g_current_call != py_pjsua.PJSUA_INVALID_ID:
py_pjsua.call_set_hold(g_current_call, None)
else:
print "No current call"
elif choice[0] == "v":
if g_current_call != py_pjsua.PJSUA_INVALID_ID:
py_pjsua.call_reinvite(g_current_call, 1, None);
else:
print "No current call"
elif choice[0] == "#":
if g_current_call == py_pjsua.PJSUA_INVALID_ID:
print "No current call"
elif py_pjsua.call_has_media(g_current_call) == 0:
print "Media is not established yet!"
else:
call = g_current_call
print "DTMF strings to send (0-9*#A-B)"
buf = sys.stdin.readline()
buf = buf.replace("\n", "")
if buf == "":
continue
if call != g_current_call:
print "Call has been disconnected"
continue
status = py_pjsua.call_dial_dtmf(g_current_call, buf)
if status != 0:
py_pjsua.perror(THIS_FILE, "Unable to send DTMF", status);
else:
print "DTMF digits enqueued for transmission"
elif choice[0] == "s":
print "Subscribe presence of (buddy id) : "
buf = sys.stdin.readline()
buf = buf.replace("\n","")
if buf == "":
continue
i = int(buf)
py_pjsua.buddy_subscribe_pres(i, 1)
elif choice[0] == "u":
print "Unsubscribe presence of (buddy id) : "
buf = sys.stdin.readline()
buf = buf.replace("\n","")
if buf == "":
continue
i = int(buf)
py_pjsua.buddy_subscribe_pres(i, 0)
elif choice[0] == "t":
acc_info = py_pjsua.acc_get_info(g_acc_id)
if acc_info.online_status == 0:
acc_info.online_status = 1
else:
acc_info.online_status = 0
py_pjsua.acc_set_online_status(g_acc_id, acc_info.online_status)
st = ""
if acc_info.online_status == 0:
st = "offline"
else:
st = "online"
print "Setting " + acc_info.acc_uri + " online status to " + st
elif choice[0] == "r":
if choice[1] == "r":
py_pjsua.acc_set_registration(g_acc_id, 1)
elif choice[1] == "u":
py_pjsua.acc_set_registration(g_acc_id, 0)
elif choice[0] == "d":
py_pjsua.dump(choice[1] == "d")
elif choice[0] == "a":
if g_current_call != py_pjsua.PJSUA_INVALID_ID:
py_pjsua.call_answer(g_current_call, 200, None, None)
else:
print "No current call"
#
# main
#
app_init()
app_start()
app_menu()
#
# Done, quitting..
#
print "PJSUA shutting down.."
C_QUIT = 1
# Give the worker thread chance to quit itself
while C_QUIT != 2:
py_pjsua.handle_events(50)
print "PJSUA destroying.."
py_pjsua.destroy()
#General Function:
def combine_funcs(*funcs):
def combined_func(*args, **kwargs):
for f in funcs:
f(*args, **kwargs)
return combined_func
#Defining the create function:
def create():
def sub_create():
def sub1():
def sub2():
f1.destroy()
b=TextArea.get("1.0",'end')
if(b!='0'):
f.write(b)
f.write("\n")
f2.destroy()
f1=tkinter.Frame(bg="#CCCCFF")
f1.pack(fill="both",expand="true")
l1=tkinter.Label(f1,text="Congratulations! Your Entry Is Made.",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true")
b1=tkinter.Button(f1,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25)
b1.pack(side="right")
a=e.get()
a=a.upper()
f1.destroy()
f=open(a,"w")
f2=tkinter.Frame(bg="#CCCCFF")
f2.pack(fill="both",expand="true")
l1=tkinter.Label(f2,text="Make An Entry:\n\nExample: Item1 Rs amount\nItem2 Rs amount",bg="#FFFFFF",fg="#680000",relief="groove",anchor="n",pady=50,font=25)
l1.pack(fill="x",expand="true")
TextArea = tkinter.Text(f2)
ScrollBar = tkinter.Scrollbar(f2,bg="white")
ScrollBar.config(command=TextArea.yview)
TextArea.config(yscrollcommand=ScrollBar.set)
ScrollBar.pack(side="right", fill="y")
TextArea.pack(fill="x")
b1=tkinter.Button(f2,text="Submit",bg="#FFFFFF",fg="blue",relief="groove",command=sub1)
b1.pack()
frame.destroy()
f1=tkinter.Frame(bg="#CCCCFF")
f1.pack(fill="both",expand="true")
l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true",anchor="n")
l1=tkinter.Label(f1,text="Enter Date:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50)
l1.place(relx=0.35,rely=0.5)
e=tkinter.Entry(f1)
e.place(relx=0.52,rely=0.5)
b1=tkinter.Button(f1,text="Make Entry",bg="#9999FF",fg="#660099",relief="groove",command=sub_create,height=2,width=10)
b1.place(relx=0.45,rely=0.56)
#Defining the get function:
def get():
def sub_get():
def sub2():
f2.destroy()
a=e.get()
a=a.upper()
f1.destroy()
f=open(a,'r')
f2=tkinter.Frame()
f2.pack(fill="both",expand="true")
l1=tkinter.Label(f2,text="Your Entries For The Date "+a+" Are: ",bg="#FFFFFF",fg="#680000",relief="groove",anchor="center",pady=50)
l1.pack(fill="x",expand="true")
TextArea = tkinter.Text(f2)
ScrollBar = tkinter.Scrollbar(f2)
ScrollBar.config(command=TextArea.yview)
ScrollBar.pack(side="right", fill="y")
TextArea.insert('insert',f.read())
TextArea.config(yscrollcommand=ScrollBar.set,state="disabled")
TextArea.pack(fill="both",expand="true")
b1=tkinter.Button(f2,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25)
b1.pack(side="right")
frame.destroy()
f1=tkinter.Frame(bg="#CCCCFF")
f1.pack(fill="both",expand="true")
l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true",anchor="n")
l1=tkinter.Label(f1,text="Enter Date:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50)
l1.place(relx=0.35,rely=0.5)
e=tkinter.Entry(f1)
e.place(relx=0.52,rely=0.5)
b1=tkinter.Button(f1,text="Get Entry",bg="#9999FF",fg="#660099",relief="groove",command=sub_get,height=2,width=10)
b1.place(relx=0.45,rely=0.56)
#Defining the get entry by month function:
def getm():
def sub_getm():
def sub2():
f2.destroy()
a=e.get()
a=a.upper()
f1.destroy()
f2=tkinter.Frame()
f2.pack(fill="both",expand="true")
l1=tkinter.Label(f2,text="Your Entries For The Month Are: ",bg="#FFFFFF",fg="#680000",relief="groove",anchor="center",pady=50)
l1.pack(fill="x",expand="true")
TextArea = tkinter.Text(f2)
ScrollBar = tkinter.Scrollbar(f2)
ScrollBar.config(command=TextArea.yview)
ScrollBar.pack(side="right", fill="y")
for i in range(1,32):
try:
f=open(str(i)+' '+a,'r')
except IOError:
continue
TextArea.insert('insert',"\n"+str(i)+' '+a+": \n\n"+f.read())
TextArea.config(yscrollcommand=ScrollBar.set,state="disabled")
TextArea.pack(fill="both",expand="true")
b1=tkinter.Button(f2,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25)
b1.pack(side="right")
frame.destroy()
f1=tkinter.Frame(bg="#CCCCFF")
f1.pack(fill="both",expand="true")
l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true",anchor="n")
l1=tkinter.Label(f1,text="Enter Month And Year:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50)
l1.place(relx=0.3,rely=0.5)
e=tkinter.Entry(f1)
e.place(relx=0.52,rely=0.5)
b1=tkinter.Button(f1,text="Get Entry",bg="#9999FF",fg="#660099",relief="groove",command=sub_getm,height=2,width=10)
b1.place(relx=0.47,rely=0.56)
#Get Total Expense By Date:
def Sum():
def sub_Sum():
def sub2():
f2.destroy()
t=e.get()
t=t.upper()
try:
f=open(t,'r')
a=f.readlines()
c=0
for i in range(0,len(a)):
b=''
for j in range(len(a[i])-1,0,-1):
if(a[i][j]!=' ' and a[i][j]>='0' and a[i][j]<='9'):
b=b+a[i][j]
if(a[i][j]>='9'):
break
if(b!=''):
c=c+int(b[::-1])
f1.destroy()
f2=tkinter.Frame()
f2.pack(fill="both",expand="true")
l1=tkinter.Label(f2,text="Total Expenses On "+t+" Are: \nRs "+str(c),bg="#FFFFFF",fg="#680000",relief="groove",anchor="center",pady=50,font=50)
l1.pack(fill="both",expand="true")
except IOError:
l1=tkinter.Label(f2,text="No Such Entry Is Made "+t+" Are: \nRs "+str(c),bg="#FFFFFF",fg="#680000",relief="groove",anchor="center",pady=50,font=50)
l1.pack(fill="both",expand="true")
b1=tkinter.Button(f2,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25)
b1.pack(side="right")
frame.destroy()
f1=tkinter.Frame(bg="#CCCCFF")
f1.pack(fill="both",expand="true")
l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true",anchor="n")
l1=tkinter.Label(f1,text="Enter Date:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50)
l1.place(relx=0.35,rely=0.5)
e=tkinter.Entry(f1)
e.place(relx=0.52,rely=0.5)
b1=tkinter.Button(f1,text="Get Expenses",bg="#9999FF",fg="#660099",relief="groove",command=sub_Sum,height=2,width=10)
b1.place(relx=0.47,rely=0.56)
#Defining Get total expense in a month:
def Summ():
def sub_Summ():
def sub2():
f2.destroy()
a=e.get()
a=a.upper()
c=0
for i in range(1,32):
try:
f=open(str(i)+' '+a,'r')
t=f.readlines()
for i in range(0,len(t)):
b=''
for j in range(len(t[i])-1,0,-1):
if(t[i][j]!=' ' and t[i][j]>='0' and t[i][j]<='9'):
b=b+t[i][j]
if(b!=''):
c=c+int(b[::-1])
except IOError:
continue
f1.destroy()
f2=tkinter.Frame()
f2.pack(fill="both",expand="true")
l1=tkinter.Label(f2,text="Total Expenses In "+a+" Are: \nRs "+str(c),bg="#FFFFFF",fg="#680000",relief="groove",anchor="center",pady=50,font=50)
l1.pack(fill="both",expand="true")
b1=tkinter.Button(f2,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25)
b1.pack(side="right")
frame.destroy()
f1=tkinter.Frame(bg="#CCCCFF")
f1.pack(fill="both",expand="true")
l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true",anchor="n")
l1=tkinter.Label(f1,text="Enter Month And Year:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50)
l1.place(relx=0.3,rely=0.5)
e=tkinter.Entry(f1)
e.place(relx=0.52,rely=0.5)
b1=tkinter.Button(f1,text="Get Expenses",bg="#9999FF",fg="#660099",relief="groove",command=sub_Summ,height=2,width=10)
b1.place(relx=0.47,rely=0.56)
# Defining Add to entry:
def add():
def sub_add():
def sub1():
def sub2():
f1.destroy()
b=TextArea.get("1.0",'end')
if(b!='0'):
f.write(b)
f.write("\n")
f2.destroy()
f1=tkinter.Frame(bg="#CCCCFF")
f1.pack(fill="both",expand="true")
l1=tkinter.Label(f1,text="Congratulations! Your Entry Is Made.",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true")
b1=tkinter.Button(f1,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25)
b1.pack(side="right")
a=e.get()
a=a.upper()
f1.destroy()
f=open(a,"a")
f2=tkinter.Frame(bg="#CCCCFF")
f2.pack(fill="both",expand="true")
l1=tkinter.Label(f2,text="Make An Entry:\n\nExample: Item1 Rs amount\nItem2 Rs amount",bg="#FFFFFF",fg="#680000",relief="groove",anchor="n",pady=50,font=25)
l1.pack(fill="x",expand="true")
TextArea = tkinter.Text(f2)
ScrollBar = tkinter.Scrollbar(f2,bg="white")
ScrollBar.config(command=TextArea.yview)
TextArea.config(yscrollcommand=ScrollBar.set)
ScrollBar.pack(side="right", fill="y")
TextArea.pack(fill="x")
b1=tkinter.Button(f2,text="Submit",bg="#FFFFFF",fg="blue",relief="groove",command=sub1)
b1.pack()
frame.destroy()
f1=tkinter.Frame(bg="#CCCCFF")
f1.pack(fill="both",expand="true")
l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true",anchor="n")
l1=tkinter.Label(f1,text="Enter Date:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50)
l1.place(relx=0.35,rely=0.5)
e=tkinter.Entry(f1)
e.place(relx=0.52,rely=0.5)
b1=tkinter.Button(f1,text="Make Entry",bg="#9999FF",fg="#660099",relief="groove",command=sub_add,height=2,width=10)
b1.place(relx=0.45,rely=0.56)
# Defining Delete Existing Entry:
def delete():
import os
delete=lambda x:os.remove(x)
def sub_delete():
def sub2():
f2.destroy()
a=e.get()
a=a.upper()
delete(a)
f1.destroy()
f2=tkinter.Frame(bg="#CCCCFF")
f2.pack(fill="both",expand="true")
l1=tkinter.Label(f2,text="Congratulations! Your Entry Is Deleted.",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true")
b1=tkinter.Button(f2,text="Return To MainMenu",bg="#FFFFFF",fg="blue",relief="groove",command=combine_funcs(sub2,start),height=2,width=25)
b1.pack(side="right")
frame.destroy()
f1=tkinter.Frame(bg="#CCCCFF")
f1.pack(fill="both",expand="true")
l1=tkinter.Label(f1,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true",anchor="n")
l1=tkinter.Label(f1,text="Enter Date:",bg="#330033",fg="#FFFFFF",relief="groove",font=9,padx=50)
l1.place(relx=0.35,rely=0.5)
e=tkinter.Entry(f1)
e.place(relx=0.52,rely=0.5)
b1=tkinter.Button(f1,text="Delete",bg="#9999FF",fg="#660099",relief="groove",command=sub_delete,height=2,width=10)
b1.place(relx=0.47,rely=0.56)
import tkinter
root=tkinter.Tk()
root.geometry('1250x700')
def start():
global frame
frame=tkinter.Frame(bg="#CCCCFF")
frame.pack(fill="both",expand="true",)
l1=tkinter.Label(frame,text="Welcome To Expense Manager",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true",anchor="n")
l1=tkinter.Label(frame,text="Press To Create A new Entry",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16,padx=16)
l1.place(relx=0.2,rely=0.2,anchor="n")
b1=tkinter.Button(frame,text="Create Entry Sheet",bg="#9999FF",fg="#660099",relief="solid",command=create,font=11,pady=16,cursor="dot")
b1.place(relx=0.2,rely=0.3,anchor="n")
l1=tkinter.Label(frame,text="Press To Get Entry By Date",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16,padx=16)
l1.place(relx=0.4,rely=0.2,anchor="n")
b1=tkinter.Button(frame,text="Get Entry Sheet",bg="#9999FF",fg="#660099",relief="solid",command=get,font=11,pady=16,cursor="dot")
b1.place(relx=0.4,rely=0.3,anchor="n")
l1=tkinter.Label(frame,text="Press To Get Entry By Month",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16,padx=7)
l1.place(relx=0.6,rely=0.2,anchor="n")
b1=tkinter.Button(frame,text="Get Monthly Entry Sheet",bg="#9999FF",fg="#660099",relief="solid",command=getm,font=11,pady=16,cursor="dot")
b1.place(relx=0.6,rely=0.3,anchor="n")
l1=tkinter.Label(frame,text="Press To Get Total Expense On Date",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16)
l1.place(relx=0.8,rely=0.2,anchor="n")
b1=tkinter.Button(frame,text="Get Total Expense On Date",bg="#9999FF",fg="#660099",relief="solid",command=Sum,font=11,pady=16,cursor="dot")
b1.place(relx=0.8,rely=0.3,anchor="n")
l1=tkinter.Label(frame,text="Press To Get Total Expense In A Month",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16)
l1.place(relx=0.3,rely=0.5,anchor="n")
b1=tkinter.Button(frame,text="Get Total Expense",bg="#9999FF",fg="#660099",relief="solid",command=Summ,font=11,pady=16,cursor="dot")
b1.place(relx=0.3,rely=0.6,anchor="n")
l1=tkinter.Label(frame,text="Press To Add To Existing Entry",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16)
l1.place(relx=0.51,rely=0.5,anchor="n")
b1=tkinter.Button(frame,text="Add To Entry",bg="#9999FF",fg="#660099",relief="solid",command=add,font=11,pady=16,cursor="dot")
b1.place(relx=0.51,rely=0.6,anchor="n")
l1=tkinter.Label(frame,text="Press To Delete Existing Entry",bg="#330066",fg="#66FFFF",relief="groove",pady=15,font=16)
l1.place(relx=0.7,rely=0.5,anchor="n")
b1=tkinter.Button(frame,text="Delete Entry",bg="#9999FF",fg="#660099",relief="solid",command=delete,font=11,pady=16,cursor="dot")
b1.place(relx=0.7,rely=0.6,anchor="n")
l1=tkinter.Label(frame,text="\u00a9"+" copyright 2016\t\t\t\t\t\t\t\t\t\t\t\t"+"Developed By: Akshit Grover",bg="#330033",fg="#FFFFFF",relief="groove",pady=30,font=16)
l1.pack(fill="x",expand="true",anchor="s")
root.mainloop()
start()
#!/usr/bin/env python
# This file is part of tcollector.
# Copyright (C) 2013 The tcollector Authors.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see .
"""
Collector for PostgreSQL.
Please, set login/password at etc/postgresql.conf .
Collector uses socket file for DB connection so set 'unix_socket_directory'
at postgresql.conf .
"""
import sys
import os
import time
import socket
import errno
try:
import psycopg2
except ImportError:
psycopg2 = None # handled in main()
COLLECTION_INTERVAL = 15 # seconds
CONNECT_TIMEOUT = 2 # seconds
from collectors.lib import utils
from collectors.etc import postgresqlconf
# Directories under which to search socket files
SEARCH_DIRS = frozenset([
"/var/run/postgresql", # Debian default
"/var/pgsql_socket", # MacOS default
"/usr/local/var/postgres", # custom compilation
"/tmp", # custom compilation
])
def find_sockdir():
"""Returns a path to PostgreSQL socket file to monitor."""
for dir in SEARCH_DIRS:
for dirpath, dirnames, dirfiles in os.walk(dir, followlinks=True):
for name in dirfiles:
# ensure selection of PostgreSQL socket only
if (utils.is_sockfile(os.path.join(dirpath, name))
and "PGSQL" in name):
return(dirpath)
def postgres_connect(sockdir):
"""Connects to the PostgreSQL server using the specified socket file."""
user, password = postgresqlconf.get_user_password()
try:
return psycopg2.connect("host='%s' user='%s' password='%s' "
"connect_timeout='%s' dbname=postgres"
% (sockdir, user, password,
CONNECT_TIMEOUT))
except (EnvironmentError, EOFError, RuntimeError, socket.error), e:
utils.err("Couldn't connect to DB :%s" % (e))
def collect(db):
"""
Collects and prints stats.
Here we collect only general info, for full list of data for collection
see http://www.postgresql.org/docs/9.2/static/monitoring-stats.html
"""
try:
cursor = db.cursor()
# general statics
cursor.execute("SELECT pg_stat_database.*, pg_database_size"
" (pg_database.datname) AS size FROM pg_database JOIN"
" pg_stat_database ON pg_database.datname ="
" pg_stat_database.datname WHERE pg_stat_database.datname"
" NOT IN ('template0', 'template1', 'postgres')")
ts = time.time()
stats = cursor.fetchall()
# datid | datname | numbackends | xact_commit | xact_rollback | blks_read | blks_hit | tup_returned | tup_fetched | tup_inserted | tup_updated | tup_deleted | conflicts | temp_files | temp_bytes | deadlocks | blk_read_time | blk_write_time | stats_reset | size
result = {}
for stat in stats:
database = stat[1]
result[database] = stat
for database in result:
for i in range(2,len(cursor.description)):
metric = cursor.description[i].name
value = result[database][i]
try:
if metric in ("stats_reset"):
continue
print ("postgresql.%s %i %s database=%s"
% (metric, ts, value, database))
except:
err("got here")
continue
# connections
cursor.execute("SELECT datname, count(datname) FROM pg_stat_activity"
" GROUP BY pg_stat_activity.datname")
ts = time.time()
connections = cursor.fetchall()
for database, connection in connections:
print ("postgresql.connections %i %s database=%s"
% (ts, connection, database))
except (EnvironmentError, EOFError, RuntimeError, socket.error), e:
if isinstance(e, IOError) and e[0] == errno.EPIPE:
# exit on a broken pipe. There is no point in continuing
# because no one will read our stdout anyway.
return 2
utils.err("error: failed to collect data: %s" % e)
def main(args):
"""Collects and dumps stats from a PostgreSQL server."""
if psycopg2 is None:
utils.err("error: Python module 'psycopg2' is missing")
return 13 # Ask tcollector to not respawn us
sockdir = find_sockdir()
if not sockdir: # Nothing to monitor
utils.err("error: Can't find postgresql socket file")
return 13 # Ask tcollector to not respawn us
db = postgres_connect(sockdir)
db.autocommit=True
while True:
collect(db)
sys.stdout.flush()
time.sleep(COLLECTION_INTERVAL)
if __name__ == "__main__":
sys.stdin.close()
sys.exit(main(sys.argv))
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from neutron.tests.api import base
from neutron.tests.tempest import exceptions
from neutron.tests.tempest import test
AGENT_TYPE = 'L3 agent'
AGENT_MODES = (
'legacy',
'dvr_snat'
)
class L3AgentSchedulerTestJSON(base.BaseAdminNetworkTest):
_agent_mode = 'legacy'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List routers that the given L3 agent is hosting.
List L3 agents hosting the given router.
Add and Remove Router to L3 agent
v2.0 of the Neutron API is assumed.
The l3_agent_scheduler extension is required for these tests.
"""
@classmethod
def skip_checks(cls):
super(L3AgentSchedulerTestJSON, cls).skip_checks()
if not test.is_extension_enabled('l3_agent_scheduler', 'network'):
msg = "L3 Agent Scheduler Extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(L3AgentSchedulerTestJSON, cls).resource_setup()
body = cls.admin_client.list_agents()
agents = body['agents']
for agent in agents:
# TODO(armax): falling back on default _agent_mode can be
# dropped as soon as Icehouse is dropped.
agent_mode = (
agent['configurations'].get('agent_mode', cls._agent_mode))
if agent['agent_type'] == AGENT_TYPE and agent_mode in AGENT_MODES:
cls.agent = agent
break
else:
msg = "L3 Agent Scheduler enabled in conf, but L3 Agent not found"
raise exceptions.InvalidConfiguration(msg)
cls.router = cls.create_router(data_utils.rand_name('router'))
# NOTE(armax): If DVR is an available extension, and the created router
# is indeed a distributed one, more resources need to be provisioned
# in order to bind the router to the L3 agent.
# That said, let's preserve the existing test logic, where the extra
# query and setup steps are only required if the extension is available
# and only if the router's default type is distributed.
if test.is_extension_enabled('dvr', 'network'):
is_dvr_router = cls.admin_client.show_router(
cls.router['id'])['router'].get('distributed', False)
if is_dvr_router:
cls.network = cls.create_network()
cls.create_subnet(cls.network)
cls.port = cls.create_port(cls.network)
cls.client.add_router_interface_with_port_id(
cls.router['id'], cls.port['id'])
@test.attr(type='smoke')
@test.idempotent_id('b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a')
def test_list_routers_on_l3_agent(self):
self.admin_client.list_routers_on_l3_agent(self.agent['id'])
@test.attr(type='smoke')
@test.idempotent_id('9464e5e7-8625-49c3-8fd1-89c52be59d66')
def test_add_list_remove_router_on_l3_agent(self):
l3_agent_ids = list()
self.admin_client.add_router_to_l3_agent(
self.agent['id'],
self.router['id'])
body = (
self.admin_client.list_l3_agents_hosting_router(self.router['id']))
for agent in body['agents']:
l3_agent_ids.append(agent['id'])
self.assertIn('agent_type', agent)
self.assertEqual('L3 agent', agent['agent_type'])
self.assertIn(self.agent['id'], l3_agent_ids)
body = self.admin_client.remove_router_from_l3_agent(
self.agent['id'],
self.router['id'])
# NOTE(afazekas): The deletion not asserted, because neutron
# is not forbidden to reschedule the router to the same agent
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA ()
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.safe_eval import safe_eval
class base_config_settings(osv.TransientModel):
_inherit = 'base.config.settings'
_columns = {
'auth_signup_reset_password': fields.boolean('Enable password reset from Login page',
help="This allows users to trigger a password reset from the Login page."),
'auth_signup_uninvited': fields.boolean('Allow external users to sign up',
help="If unchecked, only invited users may sign up."),
'auth_signup_template_user_id': fields.many2one('res.users',
string='Template user for new users created through signup'),
}
def get_default_auth_signup_template_user_id(self, cr, uid, fields, context=None):
icp = self.pool.get('ir.config_parameter')
# we use safe_eval on the result, since the value of the parameter is a nonempty string
return {
'auth_signup_reset_password': safe_eval(icp.get_param(cr, uid, 'auth_signup.reset_password', 'False')),
'auth_signup_uninvited': safe_eval(icp.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')),
'auth_signup_template_user_id': safe_eval(icp.get_param(cr, uid, 'auth_signup.template_user_id', 'False')),
}
def set_auth_signup_template_user_id(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context=context)
icp = self.pool.get('ir.config_parameter')
# we store the repr of the values, since the value of the parameter is a required string
icp.set_param(cr, uid, 'auth_signup.reset_password', repr(config.auth_signup_reset_password))
icp.set_param(cr, uid, 'auth_signup.allow_uninvited', repr(config.auth_signup_uninvited))
icp.set_param(cr, uid, 'auth_signup.template_user_id', repr(config.auth_signup_template_user_id.id))
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook contrasts.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Contrasts Overview
import numpy as np
import statsmodels.api as sm
# This document is based heavily on this excellent resource from UCLA
# http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm
# A categorical variable of K categories, or levels, usually enters a
# regression as a sequence of K-1 dummy variables. This amounts to a linear
# hypothesis on the level means. That is, each test statistic for these
# variables amounts to testing whether the mean for that level is
# statistically significantly different from the mean of the base category.
# This dummy coding is called Treatment coding in R parlance, and we will
# follow this convention. There are, however, different coding methods that
# amount to different sets of linear hypotheses.
#
# In fact, the dummy coding is not technically a contrast coding. This is
# because the dummy variables add to one and are not functionally
# independent of the model's intercept. On the other hand, a set of
# *contrasts* for a categorical variable with `k` levels is a set of `k-1`
# functionally independent linear combinations of the factor level means
# that are also independent of the sum of the dummy variables. The dummy
# coding is not wrong *per se*. It captures all of the coefficients, but it
# complicates matters when the model assumes independence of the
# coefficients such as in ANOVA. Linear regression models do not assume
# independence of the coefficients and thus dummy coding is often the only
# coding that is taught in this context.
#
# To have a look at the contrast matrices in Patsy, we will use data from
# UCLA ATS. First let's load the data.
# #### Example Data
import pandas as pd
url = 'https://stats.idre.ucla.edu/stat/data/hsb2.csv'
hsb2 = pd.read_table(url, delimiter=",")
hsb2.head(10)
# It will be instructive to look at the mean of the dependent variable,
# write, for each level of race ((1 = Hispanic, 2 = Asian, 3 = African
# American and 4 = Caucasian)).
hsb2.groupby('race')['write'].mean()
# #### Treatment (Dummy) Coding
# Dummy coding is likely the most well known coding scheme. It compares
# each level of the categorical variable to a base reference level. The base
# reference level is the value of the intercept. It is the default contrast
# in Patsy for unordered categorical factors. The Treatment contrast matrix
# for race would be
from patsy.contrasts import Treatment
levels = [1, 2, 3, 4]
contrast = Treatment(reference=0).code_without_intercept(levels)
print(contrast.matrix)
# Here we used `reference=0`, which implies that the first level,
# Hispanic, is the reference category against which the other level effects
# are measured. As mentioned above, the columns do not sum to zero and are
# thus not independent of the intercept. To be explicit, let's look at how
# this would encode the `race` variable.
hsb2.race.head(10)
print(contrast.matrix[hsb2.race - 1, :][:20])
sm.categorical(hsb2.race.values)
# This is a bit of a trick, as the `race` category conveniently maps to
# zero-based indices. If it does not, this conversion happens under the
# hood, so this will not work in general but nonetheless is a useful exercise
# to fix ideas. The below illustrates the output using the three contrasts
# above
from statsmodels.formula.api import ols
mod = ols("write ~ C(race, Treatment)", data=hsb2)
res = mod.fit()
print(res.summary())
# We explicitly gave the contrast for race; however, since Treatment is
# the default, we could have omitted this.
# ### Simple Coding
# Like Treatment Coding, Simple Coding compares each level to a fixed
# reference level. However, with simple coding, the intercept is the grand
# mean of all the levels of the factors. Patsy does not have the Simple
# contrast included, but you can easily define your own contrasts. To do so,
# write a class that contains a code_with_intercept and a
# code_without_intercept method that returns a patsy.contrast.ContrastMatrix
# instance
from patsy.contrasts import ContrastMatrix
def _name_levels(prefix, levels):
return ["[%s%s]" % (prefix, level) for level in levels]
class Simple(object):
def _simple_contrast(self, levels):
nlevels = len(levels)
contr = -1. / nlevels * np.ones((nlevels, nlevels - 1))
contr[1:][np.diag_indices(nlevels - 1)] = (nlevels - 1.) / nlevels
return contr
def code_with_intercept(self, levels):
contrast = np.column_stack((np.ones(len(levels)),
self._simple_contrast(levels)))
return ContrastMatrix(contrast, _name_levels("Simp.", levels))
def code_without_intercept(self, levels):
contrast = self._simple_contrast(levels)
return ContrastMatrix(contrast, _name_levels("Simp.", levels[:-1]))
hsb2.groupby('race')['write'].mean().mean()
contrast = Simple().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Simple)", data=hsb2)
res = mod.fit()
print(res.summary())
# ### Sum (Deviation) Coding
# Sum coding compares the mean of the dependent variable for a given level
# to the overall mean of the dependent variable over all the levels. That
# is, it uses contrasts between each of the first k-1 levels and level k In
# this example, level 1 is compared to all the others, level 2 to all the
# others, and level 3 to all the others.
from patsy.contrasts import Sum
contrast = Sum().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Sum)", data=hsb2)
res = mod.fit()
print(res.summary())
# This corresponds to a parameterization that forces all the coefficients
# to sum to zero. Notice that the intercept here is the grand mean where the
# grand mean is the mean of means of the dependent variable by each level.
hsb2.groupby('race')['write'].mean().mean()
# ### Backward Difference Coding
# In backward difference coding, the mean of the dependent variable for a
# level is compared with the mean of the dependent variable for the prior
# level. This type of coding may be useful for a nominal or an ordinal
# variable.
from patsy.contrasts import Diff
contrast = Diff().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Diff)", data=hsb2)
res = mod.fit()
print(res.summary())
# For example, here the coefficient on level 1 is the mean of `write` at
# level 2 compared with the mean at level 1. Ie.,
res.params["C(race, Diff)[D.1]"]
hsb2.groupby('race').mean()["write"][2] - hsb2.groupby(
'race').mean()["write"][1]
# ### Helmert Coding
# Our version of Helmert coding is sometimes referred to as Reverse
# Helmert Coding. The mean of the dependent variable for a level is compared
# to the mean of the dependent variable over all previous levels. Hence, the
# name 'reverse' being sometimes applied to differentiate from forward
# Helmert coding. This comparison does not make much sense for a nominal
# variable such as race, but we would use the Helmert contrast like so:
from patsy.contrasts import Helmert
contrast = Helmert().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Helmert)", data=hsb2)
res = mod.fit()
print(res.summary())
# To illustrate, the comparison on level 4 is the mean of the dependent
# variable at the previous three levels taken from the mean at level 4
grouped = hsb2.groupby('race')
grouped.mean()["write"][4] - grouped.mean()["write"][:3].mean()
# As you can see, these are only equal up to a constant. Other versions of
# the Helmert contrast give the actual difference in means. Regardless, the
# hypothesis tests are the same.
k = 4
1. / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean())
k = 3
1. / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean())
# ### Orthogonal Polynomial Coding
# The coefficients taken on by polynomial coding for `k=4` levels are the
# linear, quadratic, and cubic trends in the categorical variable. The
# categorical variable here is assumed to be represented by an underlying,
# equally spaced numeric variable. Therefore, this type of encoding is used
# only for ordered categorical variables with equal spacing. In general, the
# polynomial contrast produces polynomials of order `k-1`. Since `race` is
# not an ordered factor variable let's use `read` as an example. First we
# need to create an ordered categorical from `read`.
hsb2['readcat'] = np.asarray(pd.cut(hsb2.read, bins=3))
hsb2.groupby('readcat').mean()['write']
from patsy.contrasts import Poly
levels = hsb2.readcat.unique().tolist()
contrast = Poly().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(readcat, Poly)", data=hsb2)
res = mod.fit()
print(res.summary())
# As you can see, readcat has a significant linear effect on the dependent
# variable `write` but not a significant quadratic or cubic effect.
from __future__ import absolute_import, division, print_function
import pytest
import sys
from _pytest.skipping import MarkEvaluator, folded_skips, pytest_runtest_setup
from _pytest.runner import runtestprotocol
class TestEvaluator(object):
def test_no_marker(self, testdir):
item = testdir.getitem("def test_func(): pass")
evalskipif = MarkEvaluator(item, "skipif")
assert not evalskipif
assert not evalskipif.istrue()
def test_marked_no_args(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xyz
def test_func():
pass
"""
)
ev = MarkEvaluator(item, "xyz")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == ""
assert not ev.get("run", False)
def test_marked_one_arg(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xyz("hasattr(os, 'sep')")
def test_func():
pass
"""
)
ev = MarkEvaluator(item, "xyz")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
@pytest.mark.skipif("sys.version_info[0] >= 3")
def test_marked_one_arg_unicode(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xyz(u"hasattr(os, 'sep')")
def test_func():
pass
"""
)
ev = MarkEvaluator(item, "xyz")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
def test_marked_one_arg_with_reason(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
def test_func():
pass
"""
)
ev = MarkEvaluator(item, "xyz")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "hello world"
assert ev.get("attr") == 2
def test_marked_one_arg_twice(self, testdir):
lines = [
"""@pytest.mark.skipif("not hasattr(os, 'murks')")""",
"""@pytest.mark.skipif("hasattr(os, 'murks')")""",
]
for i in range(0, 2):
item = testdir.getitem(
"""
import pytest
%s
%s
def test_func():
pass
"""
% (lines[i], lines[(i + 1) % 2])
)
ev = MarkEvaluator(item, "skipif")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_one_arg_twice2(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.skipif("hasattr(os, 'murks')")
@pytest.mark.skipif("not hasattr(os, 'murks')")
def test_func():
pass
"""
)
ev = MarkEvaluator(item, "skipif")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_skip_with_not_string(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.skipif(False)
def test_func():
pass
"""
)
ev = MarkEvaluator(item, "skipif")
exc = pytest.raises(pytest.fail.Exception, ev.istrue)
assert """Failed: you need to specify reason=STRING when using booleans as conditions.""" in exc.value.msg
def test_skipif_class(self, testdir):
item, = testdir.getitems(
"""
import pytest
class TestClass(object):
pytestmark = pytest.mark.skipif("config._hackxyz")
def test_func(self):
pass
"""
)
item.config._hackxyz = 3
ev = MarkEvaluator(item, "skipif")
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: config._hackxyz"
class TestXFail(object):
@pytest.mark.parametrize("strict", [True, False])
def test_xfail_simple(self, testdir, strict):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xfail(strict=%s)
def test_func():
assert 0
"""
% strict
)
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.skipped
assert callreport.wasxfail == ""
def test_xfail_xpassed(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xfail(reason="this is an xfail")
def test_func():
assert 1
"""
)
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.passed
assert callreport.wasxfail == "this is an xfail"
def test_xfail_using_platform(self, testdir):
"""
Verify that platform can be used with xfail statements.
"""
item = testdir.getitem(
"""
import pytest
@pytest.mark.xfail("platform.platform() == platform.platform()")
def test_func():
assert 0
"""
)
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.wasxfail
def test_xfail_xpassed_strict(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xfail(strict=True, reason="nope")
def test_func():
assert 1
"""
)
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
assert callreport.failed
assert callreport.longrepr == "[XPASS(strict)] nope"
assert not hasattr(callreport, "wasxfail")
def test_xfail_run_anyway(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
def test_func2():
pytest.xfail("hello")
"""
)
result = testdir.runpytest("--runxfail")
result.stdout.fnmatch_lines(
["*def test_func():*", "*assert 0*", "*1 failed*1 pass*"]
)
def test_xfail_evalfalse_but_fails(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xfail('False')
def test_func():
assert 0
"""
)
reports = runtestprotocol(item, log=False)
callreport = reports[1]
assert callreport.failed
assert not hasattr(callreport, "wasxfail")
assert "xfail" in callreport.keywords
def test_xfail_not_report_default(self, testdir):
p = testdir.makepyfile(
test_one="""
import pytest
@pytest.mark.xfail
def test_this():
assert 0
"""
)
testdir.runpytest(p, "-v")
# result.stdout.fnmatch_lines([
# "*HINT*use*-r*"
# ])
def test_xfail_not_run_xfail_reporting(self, testdir):
p = testdir.makepyfile(
test_one="""
import pytest
@pytest.mark.xfail(run=False, reason="noway")
def test_this():
assert 0
@pytest.mark.xfail("True", run=False)
def test_this_true():
assert 0
@pytest.mark.xfail("False", run=False, reason="huh")
def test_this_false():
assert 1
"""
)
result = testdir.runpytest(p, "-rx")
result.stdout.fnmatch_lines(
[
"*test_one*test_this*",
"*NOTRUN*noway",
"*test_one*test_this_true*",
"*NOTRUN*condition:*True*",
"*1 passed*",
]
)
def test_xfail_not_run_no_setup_run(self, testdir):
p = testdir.makepyfile(
test_one="""
import pytest
@pytest.mark.xfail(run=False, reason="hello")
def test_this():
assert 0
def setup_module(mod):
raise ValueError(42)
"""
)
result = testdir.runpytest(p, "-rx")
result.stdout.fnmatch_lines(
["*test_one*test_this*", "*NOTRUN*hello", "*1 xfailed*"]
)
def test_xfail_xpass(self, testdir):
p = testdir.makepyfile(
test_one="""
import pytest
@pytest.mark.xfail
def test_that():
assert 1
"""
)
result = testdir.runpytest(p, "-rX")
result.stdout.fnmatch_lines(["*XPASS*test_that*", "*1 xpassed*"])
assert result.ret == 0
def test_xfail_imperative(self, testdir):
p = testdir.makepyfile(
"""
import pytest
def test_this():
pytest.xfail("hello")
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 xfailed*"])
result = testdir.runpytest(p, "-rx")
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"])
result = testdir.runpytest(p, "--runxfail")
result.stdout.fnmatch_lines("*1 pass*")
def test_xfail_imperative_in_setup_function(self, testdir):
p = testdir.makepyfile(
"""
import pytest
def setup_function(function):
pytest.xfail("hello")
def test_this():
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 xfailed*"])
result = testdir.runpytest(p, "-rx")
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"])
result = testdir.runpytest(p, "--runxfail")
result.stdout.fnmatch_lines(
"""
*def test_this*
*1 fail*
"""
)
def xtest_dynamic_xfail_set_during_setup(self, testdir):
p = testdir.makepyfile(
"""
import pytest
def setup_function(function):
pytest.mark.xfail(function)
def test_this():
assert 0
def test_that():
assert 1
"""
)
result = testdir.runpytest(p, "-rxX")
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*XPASS*test_that*"])
def test_dynamic_xfail_no_run(self, testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.fixture
def arg(request):
request.applymarker(pytest.mark.xfail(run=False))
def test_this(arg):
assert 0
"""
)
result = testdir.runpytest(p, "-rxX")
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*NOTRUN*"])
def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.fixture
def arg(request):
request.applymarker(pytest.mark.xfail)
def test_this2(arg):
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 xfailed*"])
@pytest.mark.parametrize(
"expected, actual, matchline",
[
("TypeError", "TypeError", "*1 xfailed*"),
("(AttributeError, TypeError)", "TypeError", "*1 xfailed*"),
("TypeError", "IndexError", "*1 failed*"),
("(AttributeError, TypeError)", "IndexError", "*1 failed*"),
],
)
def test_xfail_raises(self, expected, actual, matchline, testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.xfail(raises=%s)
def test_raises():
raise %s()
"""
% (expected, actual)
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([matchline])
def test_strict_sanity(self, testdir):
"""sanity check for xfail(strict=True): a failing test should behave
exactly like a normal xfail.
"""
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=True)
def test_foo():
assert 0
"""
)
result = testdir.runpytest(p, "-rxX")
result.stdout.fnmatch_lines(["*XFAIL*", "*unsupported feature*"])
assert result.ret == 0
@pytest.mark.parametrize("strict", [True, False])
def test_strict_xfail(self, testdir, strict):
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=%s)
def test_foo():
with open('foo_executed', 'w'): pass # make sure test executes
"""
% strict
)
result = testdir.runpytest(p, "-rxX")
if strict:
result.stdout.fnmatch_lines(
["*test_foo*", "*XPASS(strict)*unsupported feature*"]
)
else:
result.stdout.fnmatch_lines(
[
"*test_strict_xfail*",
"XPASS test_strict_xfail.py::test_foo unsupported feature",
]
)
assert result.ret == (1 if strict else 0)
assert testdir.tmpdir.join("foo_executed").isfile()
@pytest.mark.parametrize("strict", [True, False])
def test_strict_xfail_condition(self, testdir, strict):
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.xfail(False, reason='unsupported feature', strict=%s)
def test_foo():
pass
"""
% strict
)
result = testdir.runpytest(p, "-rxX")
result.stdout.fnmatch_lines("*1 passed*")
assert result.ret == 0
@pytest.mark.parametrize("strict", [True, False])
def test_xfail_condition_keyword(self, testdir, strict):
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.xfail(condition=False, reason='unsupported feature', strict=%s)
def test_foo():
pass
"""
% strict
)
result = testdir.runpytest(p, "-rxX")
result.stdout.fnmatch_lines("*1 passed*")
assert result.ret == 0
@pytest.mark.parametrize("strict_val", ["true", "false"])
def test_strict_xfail_default_from_file(self, testdir, strict_val):
testdir.makeini(
"""
[pytest]
xfail_strict = %s
"""
% strict_val
)
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.xfail(reason='unsupported feature')
def test_foo():
pass
"""
)
result = testdir.runpytest(p, "-rxX")
strict = strict_val == "true"
result.stdout.fnmatch_lines("*1 failed*" if strict else "*1 xpassed*")
assert result.ret == (1 if strict else 0)
class TestXFailwithSetupTeardown(object):
def test_failing_setup_issue9(self, testdir):
testdir.makepyfile(
"""
import pytest
def setup_function(func):
assert 0
@pytest.mark.xfail
def test_func():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 xfail*"])
def test_failing_teardown_issue9(self, testdir):
testdir.makepyfile(
"""
import pytest
def teardown_function(func):
assert 0
@pytest.mark.xfail
def test_func():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 xfail*"])
class TestSkip(object):
def test_skip_class(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skip
class TestSomething(object):
def test_foo(self):
pass
def test_bar(self):
pass
def test_baz():
pass
"""
)
rec = testdir.inline_run()
rec.assertoutcome(skipped=2, passed=1)
def test_skips_on_false_string(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skip('False')
def test_foo():
pass
"""
)
rec = testdir.inline_run()
rec.assertoutcome(skipped=1)
def test_arg_as_reason(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skip('testing stuff')
def test_bar():
pass
"""
)
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines(["*testing stuff*", "*1 skipped*"])
def test_skip_no_reason(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skip
def test_foo():
pass
"""
)
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines(["*unconditional skip*", "*1 skipped*"])
def test_skip_with_reason(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skip(reason="for lolz")
def test_bar():
pass
"""
)
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines(["*for lolz*", "*1 skipped*"])
def test_only_skips_marked_test(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skip
def test_foo():
pass
@pytest.mark.skip(reason="nothing in particular")
def test_bar():
pass
def test_baz():
assert True
"""
)
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines(["*nothing in particular*", "*1 passed*2 skipped*"])
def test_strict_and_skip(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skip
def test_hello():
pass
"""
)
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines(["*unconditional skip*", "*1 skipped*"])
class TestSkipif(object):
def test_skipif_conditional(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.skipif("hasattr(os, 'sep')")
def test_func():
pass
"""
)
x = pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item))
assert x.value.msg == "condition: hasattr(os, 'sep')"
@pytest.mark.parametrize(
"params", ["\"hasattr(sys, 'platform')\"", 'True, reason="invalid platform"']
)
def test_skipif_reporting(self, testdir, params):
p = testdir.makepyfile(
test_foo="""
import pytest
@pytest.mark.skipif(%(params)s)
def test_that():
assert 0
"""
% dict(params=params)
)
result = testdir.runpytest(p, "-s", "-rs")
result.stdout.fnmatch_lines(["*SKIP*1*test_foo.py*platform*", "*1 skipped*"])
assert result.ret == 0
def test_skipif_using_platform(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.skipif("platform.platform() == platform.platform()")
def test_func():
pass
"""
)
pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item))
@pytest.mark.parametrize(
"marker, msg1, msg2",
[("skipif", "SKIP", "skipped"), ("xfail", "XPASS", "xpassed")],
)
def test_skipif_reporting_multiple(self, testdir, marker, msg1, msg2):
testdir.makepyfile(
test_foo="""
import pytest
@pytest.mark.{marker}(False, reason='first_condition')
@pytest.mark.{marker}(True, reason='second_condition')
def test_foobar():
assert 1
""".format(
marker=marker
)
)
result = testdir.runpytest("-s", "-rsxX")
result.stdout.fnmatch_lines(
[
"*{msg1}*test_foo.py*second_condition*".format(msg1=msg1),
"*1 {msg2}*".format(msg2=msg2),
]
)
assert result.ret == 0
def test_skip_not_report_default(testdir):
p = testdir.makepyfile(
test_one="""
import pytest
def test_this():
pytest.skip("hello")
"""
)
result = testdir.runpytest(p, "-v")
result.stdout.fnmatch_lines(
[
# "*HINT*use*-r*",
"*1 skipped*"
]
)
def test_skipif_class(testdir):
p = testdir.makepyfile(
"""
import pytest
class TestClass(object):
pytestmark = pytest.mark.skipif("True")
def test_that(self):
assert 0
def test_though(self):
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*2 skipped*"])
def test_skip_reasons_folding():
path = "xyz"
lineno = 3
message = "justso"
longrepr = (path, lineno, message)
class X(object):
pass
ev1 = X()
ev1.when = "execute"
ev1.skipped = True
ev1.longrepr = longrepr
ev2 = X()
ev2.when = "execute"
ev2.longrepr = longrepr
ev2.skipped = True
# ev3 might be a collection report
ev3 = X()
ev3.longrepr = longrepr
ev3.skipped = True
values = folded_skips([ev1, ev2, ev3])
assert len(values) == 1
num, fspath, lineno, reason = values[0]
assert num == 3
assert fspath == path
assert lineno == lineno
assert reason == message
def test_skipped_reasons_functional(testdir):
testdir.makepyfile(
test_one="""
from conftest import doskip
def setup_function(func):
doskip()
def test_func():
pass
class TestClass(object):
def test_method(self):
doskip()
""",
conftest="""
import pytest
def doskip():
pytest.skip('test')
""",
)
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines(["*SKIP*2*conftest.py:4: test"])
assert result.ret == 0
def test_skipped_folding(testdir):
testdir.makepyfile(
test_one="""
import pytest
pytestmark = pytest.mark.skip("Folding")
def setup_function(func):
pass
def test_func():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines(["*SKIP*2*test_one.py: Folding"])
assert result.ret == 0
def test_reportchars(testdir):
testdir.makepyfile(
"""
import pytest
def test_1():
assert 0
@pytest.mark.xfail
def test_2():
assert 0
@pytest.mark.xfail
def test_3():
pass
def test_4():
pytest.skip("four")
"""
)
result = testdir.runpytest("-rfxXs")
result.stdout.fnmatch_lines(
["FAIL*test_1*", "XFAIL*test_2*", "XPASS*test_3*", "SKIP*four*"]
)
def test_reportchars_error(testdir):
testdir.makepyfile(
conftest="""
def pytest_runtest_teardown():
assert 0
""",
test_simple="""
def test_foo():
pass
""",
)
result = testdir.runpytest("-rE")
result.stdout.fnmatch_lines(["ERROR*test_foo*"])
def test_reportchars_all(testdir):
testdir.makepyfile(
"""
import pytest
def test_1():
assert 0
@pytest.mark.xfail
def test_2():
assert 0
@pytest.mark.xfail
def test_3():
pass
def test_4():
pytest.skip("four")
"""
)
result = testdir.runpytest("-ra")
result.stdout.fnmatch_lines(
["FAIL*test_1*", "SKIP*four*", "XFAIL*test_2*", "XPASS*test_3*"]
)
def test_reportchars_all_error(testdir):
testdir.makepyfile(
conftest="""
def pytest_runtest_teardown():
assert 0
""",
test_simple="""
def test_foo():
pass
""",
)
result = testdir.runpytest("-ra")
result.stdout.fnmatch_lines(["ERROR*test_foo*"])
@pytest.mark.xfail("hasattr(sys, 'pypy_version_info')")
def test_errors_in_xfail_skip_expressions(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skipif("asd")
def test_nameerror():
pass
@pytest.mark.xfail("syntax error")
def test_syntax():
pass
def test_func():
pass
"""
)
result = testdir.runpytest()
markline = " ^"
if sys.platform.startswith("java"):
# XXX report this to java
markline = "*" + markline[8:]
result.stdout.fnmatch_lines(
[
"*ERROR*test_nameerror*",
"*evaluating*skipif*expression*",
"*asd*",
"*ERROR*test_syntax*",
"*evaluating*xfail*expression*",
" syntax error",
markline,
"SyntaxError: invalid syntax",
"*1 pass*2 error*",
]
)
def test_xfail_skipif_with_globals(testdir):
testdir.makepyfile(
"""
import pytest
x = 3
@pytest.mark.skipif("x == 3")
def test_skip1():
pass
@pytest.mark.xfail("x == 3")
def test_boolean():
assert 0
"""
)
result = testdir.runpytest("-rsx")
result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"])
def test_direct_gives_error(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skipif(True)
def test_skip1():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 error*"])
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines(
[
"*skipif(*condition)*skip*",
"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*",
]
)
def test_xfail_test_setup_exception(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
0 / 0
"""
)
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.xfail
def test_func():
assert 0
"""
)
result = testdir.runpytest(p)
assert result.ret == 0
assert "xfailed" in result.stdout.str()
assert "xpassed" not in result.stdout.str()
def test_imperativeskip_on_xfail_test(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.xfail
def test_that_fails():
assert 0
@pytest.mark.skipif("True")
def test_hello():
pass
"""
)
testdir.makeconftest(
"""
import pytest
def pytest_runtest_setup(item):
pytest.skip("abc")
"""
)
result = testdir.runpytest("-rsxX")
result.stdout.fnmatch_lines_random(
"""
*SKIP*abc*
*SKIP*condition: True*
*2 skipped*
"""
)
class TestBooleanCondition(object):
def test_skipif(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skipif(True, reason="True123")
def test_func1():
pass
@pytest.mark.skipif(False, reason="True123")
def test_func2():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*1 passed*1 skipped*
"""
)
def test_skipif_noreason(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skipif(True)
def test_func():
pass
"""
)
result = testdir.runpytest("-rs")
result.stdout.fnmatch_lines(
"""
*1 error*
"""
)
def test_xfail(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.xfail(True, reason="True123")
def test_func():
assert 0
"""
)
result = testdir.runpytest("-rxs")
result.stdout.fnmatch_lines(
"""
*XFAIL*
*True123*
*1 xfail*
"""
)
def test_xfail_item(testdir):
# Ensure pytest.xfail works with non-Python Item
testdir.makeconftest(
"""
import pytest
class MyItem(pytest.Item):
nodeid = 'foo'
def runtest(self):
pytest.xfail("Expected Failure")
def pytest_collect_file(path, parent):
return MyItem("foo", parent)
"""
)
result = testdir.inline_run()
passed, skipped, failed = result.listoutcomes()
assert not failed
xfailed = [r for r in skipped if hasattr(r, "wasxfail")]
assert xfailed
def test_module_level_skip_error(testdir):
"""
Verify that using pytest.skip at module level causes a collection error
"""
testdir.makepyfile(
"""
import pytest
@pytest.skip
def test_func():
assert True
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines("*Using pytest.skip outside of a test is not allowed*")
def test_module_level_skip_with_allow_module_level(testdir):
"""
Verify that using pytest.skip(allow_module_level=True) is allowed
"""
testdir.makepyfile(
"""
import pytest
pytest.skip("skip_module_level", allow_module_level=True)
def test_func():
assert 0
"""
)
result = testdir.runpytest("-rxs")
result.stdout.fnmatch_lines("*SKIP*skip_module_level")
def test_invalid_skip_keyword_parameter(testdir):
"""
Verify that using pytest.skip() with unknown parameter raises an error
"""
testdir.makepyfile(
"""
import pytest
pytest.skip("skip_module_level", unknown=1)
def test_func():
assert 0
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines("*TypeError:*['unknown']*")
def test_mark_xfail_item(testdir):
# Ensure pytest.mark.xfail works with non-Python Item
testdir.makeconftest(
"""
import pytest
class MyItem(pytest.Item):
nodeid = 'foo'
def setup(self):
marker = pytest.mark.xfail(True, reason="Expected failure")
self.add_marker(marker)
def runtest(self):
assert False
def pytest_collect_file(path, parent):
return MyItem("foo", parent)
"""
)
result = testdir.inline_run()
passed, skipped, failed = result.listoutcomes()
assert not failed
xfailed = [r for r in skipped if hasattr(r, "wasxfail")]
assert xfailed
def test_summary_list_after_errors(testdir):
"""Ensure the list of errors/fails/xfails/skips appears after tracebacks in terminal reporting."""
testdir.makepyfile(
"""
import pytest
def test_fail():
assert 0
"""
)
result = testdir.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"=* FAILURES *=",
"*= short test summary info =*",
"FAIL test_summary_list_after_errors.py::test_fail",
]
)
"""SCons.Tool.sgilink
Tool-specific initialization for the SGI MIPSPro linker on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgilink.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Util
import link
linkers = ['CC', 'cc']
def generate(env):
"""Add Builders and construction variables for MIPSPro to an Environment."""
link.generate(env)
env['LINK'] = env.Detect(linkers) or 'cc'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env['RPATHPREFIX'] = '-rpath '
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return env.Detect(linkers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Stratum'
db.create_table('cvmfsmon_stratum', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('alias', self.gf('django.db.models.fields.CharField')(max_length=20)),
('level', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('cvmfsmon', ['Stratum'])
# Adding unique constraint on 'Stratum', fields ['alias', 'level']
db.create_unique('cvmfsmon_stratum', ['alias', 'level'])
# Adding model 'Repository'
db.create_table('cvmfsmon_repository', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('fqrn', self.gf('django.db.models.fields.CharField')(max_length=100)),
('project_url', self.gf('django.db.models.fields.URLField')(max_length=255, blank=True)),
('project_description', self.gf('django.db.models.fields.TextField')(blank=True)),
('stratum0', self.gf('django.db.models.fields.related.ForeignKey')(related_name='stratum0', to=orm['cvmfsmon.Stratum'])),
))
db.send_create_signal('cvmfsmon', ['Repository'])
# Adding M2M table for field stratum1s on 'Repository'
m2m_table_name = db.shorten_name('cvmfsmon_repository_stratum1s')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('repository', models.ForeignKey(orm['cvmfsmon.repository'], null=False)),
('stratum', models.ForeignKey(orm['cvmfsmon.stratum'], null=False))
))
db.create_unique(m2m_table_name, ['repository_id', 'stratum_id'])
def backwards(self, orm):
# Removing unique constraint on 'Stratum', fields ['alias', 'level']
db.delete_unique('cvmfsmon_stratum', ['alias', 'level'])
# Deleting model 'Stratum'
db.delete_table('cvmfsmon_stratum')
# Deleting model 'Repository'
db.delete_table('cvmfsmon_repository')
# Removing M2M table for field stratum1s on 'Repository'
db.delete_table(db.shorten_name('cvmfsmon_repository_stratum1s'))
models = {
'cvmfsmon.repository': {
'Meta': {'object_name': 'Repository'},
'fqrn': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'stratum0': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stratum0'", 'to': "orm['cvmfsmon.Stratum']"}),
'stratum1s': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'stratum1s'", 'symmetrical': 'False', 'to': "orm['cvmfsmon.Stratum']"})
},
'cvmfsmon.stratum': {
'Meta': {'unique_together': "(('alias', 'level'),)", 'object_name': 'Stratum'},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['cvmfsmon']
import pytest
from flexget.task import TaskAbort
class TestQualityPriority:
config = """
tasks:
test_reorder_quality:
mock:
- {title: 'Some Show S01E01 WEBRip'}
- {title: 'Some Show S01E01 HDTV'}
reorder_quality:
webrip:
above: hdtv
sort_by:
field: quality
reverse: yes
test_normal_quality_priority:
mock:
- {title: 'Some Show S01E02 WEBRip'}
- {title: 'Some Show S01E02 HDTV'}
sort_by:
field: quality
reverse: yes
test_invalid_reorder_quality:
reorder_quality:
h264:
above: hdtv
"""
def test_reorder_quality(self, execute_task):
task = execute_task('test_reorder_quality')
assert (
task.all_entries[0]['title'] == 'Some Show S01E01 WEBRip'
), 'WEBRip should have been accepted'
task = execute_task('test_normal_quality_priority')
assert (
task.all_entries[0]['title'] == 'Some Show S01E02 HDTV'
), 'HDTV should have been accepted'
def test_invalid_reorder_quality(self, execute_task):
with pytest.raises(TaskAbort) as e:
execute_task('test_invalid_reorder_quality')
assert e.value.reason == 'h264=codec and hdtv=source do not have the same quality type'
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""JSON schemas compiler for Zenodo."""
from __future__ import absolute_import, print_function
from . import config
from .cli import jsonschemas
class ZenodoJSONSchemas(object):
"""Zenodo records extension."""
def __init__(self, app=None):
"""Extension initialization."""
if app:
self.init_app(app)
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.extensions['zenodo-jsonschemas'] = self
app.cli.add_command(jsonschemas)
@staticmethod
def init_config(app):
"""Initialize configuration."""
for k in dir(config):
if k.startswith('ZENODO_JSONSCHEMAS_'):
app.config.setdefault(k, getattr(config, k))
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm, common_callchain,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid):
pass
def trace_unhandled(event_name, context, event_fields_dict):
pass
"""Compatibility with the nrpe-external-master charm"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Matthew Wedgwood
import subprocess
import pwd
import grp
import os
import re
import shlex
import yaml
from charmhelpers.core.hookenv import (
config,
local_unit,
log,
relation_ids,
relation_set,
)
from charmhelpers.core.host import service
# This module adds compatibility with the nrpe-external-master and plain nrpe
# subordinate charms. To use it in your charm:
#
# 1. Update metadata.yaml
#
# provides:
# (...)
# nrpe-external-master:
# interface: nrpe-external-master
# scope: container
#
# and/or
#
# provides:
# (...)
# local-monitors:
# interface: local-monitors
# scope: container
#
# 2. Add the following to config.yaml
#
# nagios_context:
# default: "juju"
# type: string
# description: |
# Used by the nrpe subordinate charms.
# A string that will be prepended to instance name to set the host name
# in nagios. So for instance the hostname would be something like:
# juju-myservice-0
# If you're running multiple environments with the same services in them
# this allows you to differentiate between them.
#
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
#
# 4. Update your hooks.py with something like this:
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE()
# nrpe_compat.add_check(
# shortname = "myservice",
# description = "Check MyService",
# check_cmd = "check_http -w 2 -c 10 http://localhost"
# )
# nrpe_compat.add_check(
# "myservice_other",
# "Check for widget failures",
# check_cmd = "/srv/myapp/scripts/widget_check"
# )
# nrpe_compat.write()
#
# def config_changed():
# (...)
# update_nrpe_config()
#
# def nrpe_external_master_relation_changed():
# update_nrpe_config()
#
# def local_monitors_relation_changed():
# update_nrpe_config()
#
# 5. ln -s hooks.py nrpe-external-master-relation-changed
# ln -s hooks.py local-monitors-relation-changed
class CheckException(Exception):
pass
class Check(object):
shortname_re = '[A-Za-z0-9-_]+$'
service_template = ("""
#---------------------------------------------------
# This file is Juju managed
#---------------------------------------------------
define service {{
use active-service
host_name {nagios_hostname}
service_description {nagios_hostname}[{shortname}] """
"""{description}
check_command check_nrpe!{command}
servicegroups {nagios_servicegroup}
}}
""")
def __init__(self, shortname, description, check_cmd):
super(Check, self).__init__()
# XXX: could be better to calculate this from the service name
if not re.match(self.shortname_re, shortname):
raise CheckException("shortname must match {}".format(
Check.shortname_re))
self.shortname = shortname
self.command = "check_{}".format(shortname)
# Note: a set of invalid characters is defined by the
# Nagios server config
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
self.description = description
self.check_cmd = self._locate_cmd(check_cmd)
def _locate_cmd(self, check_cmd):
search_path = (
'/usr/lib/nagios/plugins',
'/usr/local/lib/nagios/plugins',
)
parts = shlex.split(check_cmd)
for path in search_path:
if os.path.exists(os.path.join(path, parts[0])):
command = os.path.join(path, parts[0])
if len(parts) > 1:
command += " " + " ".join(parts[1:])
return command
log('Check command not found: {}'.format(parts[0]))
return ''
def write(self, nagios_context, hostname):
nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
self.command)
with open(nrpe_check_file, 'w') as nrpe_check_config:
nrpe_check_config.write("# check {}\n".format(self.shortname))
nrpe_check_config.write("command[{}]={}\n".format(
self.command, self.check_cmd))
if not os.path.exists(NRPE.nagios_exportdir):
log('Not writing service config as {} is not accessible'.format(
NRPE.nagios_exportdir))
else:
self.write_service_config(nagios_context, hostname)
def write_service_config(self, nagios_context, hostname):
for f in os.listdir(NRPE.nagios_exportdir):
if re.search('.*{}.cfg'.format(self.command), f):
os.remove(os.path.join(NRPE.nagios_exportdir, f))
templ_vars = {
'nagios_hostname': hostname,
'nagios_servicegroup': nagios_context,
'description': self.description,
'shortname': self.shortname,
'command': self.command,
}
nrpe_service_text = Check.service_template.format(**templ_vars)
nrpe_service_file = '{}/service__{}_{}.cfg'.format(
NRPE.nagios_exportdir, hostname, self.command)
with open(nrpe_service_file, 'w') as nrpe_service_config:
nrpe_service_config.write(str(nrpe_service_text))
def run(self):
subprocess.call(self.check_cmd)
class NRPE(object):
nagios_logdir = '/var/log/nagios'
nagios_exportdir = '/var/lib/nagios/export'
nrpe_confdir = '/etc/nagios/nrpe.d'
def __init__(self, hostname=None):
super(NRPE, self).__init__()
self.config = config()
self.nagios_context = self.config['nagios_context']
self.unit_name = local_unit().replace('/', '-')
if hostname:
self.hostname = hostname
else:
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
self.checks = []
def add_check(self, *args, **kwargs):
self.checks.append(Check(*args, **kwargs))
def write(self):
try:
nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid
except:
log("Nagios user not set up, nrpe checks not updated")
return
if not os.path.exists(NRPE.nagios_logdir):
os.mkdir(NRPE.nagios_logdir)
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
nrpe_monitors = {}
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
for nrpecheck in self.checks:
nrpecheck.write(self.nagios_context, self.hostname)
nrpe_monitors[nrpecheck.shortname] = {
"command": nrpecheck.command,
}
service('restart', 'nagios-nrpe-server')
for rid in relation_ids("local-monitors"):
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
#! /usr/bin/env python
"""Hook to generate a CSV file summarising the schedule so that we can easily
see which sessions have changed and who needs to be notified.
There's a lot of copying and pasting from Kev's guidebook.py.
"""
import codecs
import csv
import cStringIO
import io
import os
from flat_schedule import mkdirs, read_html_tabular_schedule
EVENT_TYPES = {'demo', 'workshop', 'talk', 'panel'}
class UnicodeWriter(object):
# https://docs.python.org/2.7/library/csv.html#writer-objects
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode('utf-8') for s in row])
data = self.queue.getvalue()
data = data.decode('utf-8')
data = self.encoder.encode(data)
self.stream.write(data)
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def write_summary_schedule(schedule, config):
schedule_dir = os.path.join(config['output_dir'], 'schedule', 'summary')
schedule_path = os.path.join(schedule_dir, 'summary.csv')
mkdirs(schedule_dir)
headings = ['Title', 'Speaker', 'Date', 'Time', 'Room']
with io.open(schedule_path, 'wb') as csvfile:
writer = UnicodeWriter(csvfile)
writer.writerow(headings)
for talk in schedule:
writer.writerow(make_row(talk, config))
def make_row(talk, config):
title = talk['title']
speaker = extract_speaker(talk, config)
date = talk['start'].strftime('%Y-%m-%d')
time = talk['start'].strftime('%H:%M')
room = talk['location']
return [title, speaker, date, time, room]
def extract_speaker(talk, config):
if talk['type'] in EVENT_TYPES:
path = os.path.join(config['content_dir'], talk['href'].strip('/') + '.md')
with open(path) as f:
for line in f:
line = line.decode('utf-8')
if line[:4] == '### ':
return line[4:].strip()
return ''
def create_summary_schedule(config):
schedule = read_html_tabular_schedule(config)
write_summary_schedule(schedule, config)
if __name__ == '__main__':
config = {
'template_dir': 'templates',
'output_dir': 'output',
'content_dir': 'content'
}
create_summary_schedule(config)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## This utility takes the debian directory from an unpacked debian mono source
## tree (e.g. apt-get source mono), parses the *.install files and generates a
## bitbake include file with the file and package lists. It tries to handle -dbg
## packages by creating additional glob patterns for *.mdb and */.debug/*. Most
## of these will not match, but that's fine (I think).
## -- Henryk Plötz
##
##The output looks like:
##FILES_mono-jit-dbg = "/usr/bin/mono*.mdb \
## /usr/bin/mono*/*.mdb"
##FILES_mono-jit = "/usr/bin/mono"
##FILES_mono-gac-dbg = "/usr/bin/gacutil*.mdb \
## /usr/bin/gacutil*/*.mdb \
## /usr/lib/mono/1.0/gacutil.exe*.mdb \
## /usr/lib/mono/1.0/gacutil.exe*/*.mdb"
##FILES_mono-gac = "/usr/bin/gacutil \
## /usr/lib/mono/1.0/gacutil.exe"
## ...
##PACKAGES = "mono-jit-dbg \
## mono-jit \
## mono-gac-dbg \
## mono-gac \
## ...
import os, sys, re
def collect_paths(dir):
paths = {}
os.chdir(dir)
for filename in os.listdir("."):
if filename.endswith(".install"):
fp = file(filename, "r")
lines = fp.readlines()
fp.close()
contents = []
for line in lines:
line = line.strip()
if line.startswith("#"): continue
if line == "": continue
lineparts = line.split()
if lineparts[0].startswith("debian/tmp"):
pattern = lineparts[0][ len("debian/tmp"): ]
if len(lineparts) == 2:
if not pattern.startswith(lineparts[1]):
print >>sys.stderr, "Warning: Apparently I don't fully understand the format in file %s" % filename
elif len(lineparts) > 2:
print >>sys.stderr, "Warning: Apparently I don't fully understand the format in file %s" % filename
contents.append( pattern )
else:
print >>sys.stderr, "Note: Ignoring %s in %s" % (lineparts, filename)
paths[ filename[ :-len(".install") ] ] = contents
return paths
def collect_packages(paths):
# These packages should be populated first (e.g. because their files will otherwise end up
# in other packages)
PACKAGES_FIRST = ("mono-jit", "mono-gac", "mono-mjs", "mono-gmcs", "mono-utils", "mono-doc")
# These should be populated last (because their spec is very broad)
PACKAGES_LAST = ("mono-mcs", "libmono-system1.0-cil", "libmono-system2.0-cil", "libmono1.0-cil", "libmono2.0-cil")
first = []
last = []
packages = paths.keys()
for packagename in PACKAGES_FIRST + PACKAGES_LAST:
if packagename in packages:
packages.remove(packagename)
if packagename in PACKAGES_FIRST:
first.append(packagename)
else:
last.append(packagename)
packagenames = first + packages + last
return packagenames, paths
def debugify(packagenames, paths):
pnames = []
for pkg in packagenames:
if not pkg.endswith("-dbg"):
result = []
for path in paths[pkg]:
if not path.endswith("*"):
result.append(path + "*.mdb")
result.append(path + "*/*.mdb")
else:
result.append(path + ".mdb")
result.append(path + "/*.mdb")
if path.endswith("/"):
result.append(path + ".debug/")
result.append(path + "../.debug/")
paths[pkg + "-dbg"] = result
pnames.append(pkg + "-dbg")
pnames.append(pkg)
return pnames, paths
if __name__ == "__main__":
packagenames, paths = collect_packages( collect_paths(".") )
packagenames, paths = debugify(packagenames, paths)
print "# This is a generated file, please do not edit directly"
print "# Use collect-files.py instead. -- Henryk "
packages = []
for pkg in packagenames:
if not paths[pkg]: continue
print 'FILES_%s = "%s"' % (pkg, " \\\n\t".join(paths[pkg]))
packages.append(pkg)
print
print 'PACKAGES = "%s"' % (" \\\n\t".join(packages))
#!/usr/bin/env python
#coding: utf-8
from collections import defaultdict
import sys
import datetime
result = defaultdict(lambda : defaultdict(lambda :defaultdict(set)))
def print_result():
for date_key in result.iterkeys():
year, month, req_type = date_key.split('_')
for from_country in result[date_key].iterkeys():
for req_country in result[date_key][from_country].iterkeys():
print '{};{:02d};{};{};{};{}'.format(year,int(month),from_country,req_country,req_type,len(result[date_key][from_country][req_country]))
try:
with sys.stdin as file:
for rec in file:
try:
parts = rec.strip().split('|')
req_type = 'R' if len(parts) == 6 and parts[5]=='.routing' else 'M'
from_country = parts[0]
date = datetime.datetime.strptime(parts[2], '%d/%b/%Y:%H:%M:%S')
user_id = parts[3]
req_country = parts[4].split('_')[0]
date_key = '{}_{}_{}'.format(date.year,date.month,req_type)
user_key = '{}_{}'.format(user_id,req_country)
result[date_key][from_country][req_country].add(user_key)
except:
pass # ignore all errors for one string
except KeyboardInterrupt:
print_result()
exit(0)
except:
print_result()
raise
print_result()
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('browser', '0003_auto_20150320_0253'),
]
operations = [
migrations.CreateModel(
name='ROnline',
fields=[
('resource_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='browser.Resource')),
('otype', models.CharField(max_length=1, choices=[(b'0', b'video'), (b'1', b'article'), (b'2', b'web site')])),
('date', models.DateTimeField()),
('url', models.TextField(blank=True)),
],
options={
},
bases=('browser.resource',),
),
migrations.CreateModel(
name='SServices',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('diagnostic', models.BooleanField(default=False)),
('resource', models.BooleanField(default=False)),
('therapy', models.BooleanField(default=False)),
('educational', models.BooleanField(default=False)),
('referral', models.BooleanField(default=False)),
('legal', models.BooleanField(default=False)),
('city', models.CharField(max_length=30)),
('resourceLink', models.ForeignKey(to='browser.Resource')),
],
options={
},
bases=(models.Model,),
),
migrations.DeleteModel(
name='SService',
),
]
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
from BasePanel import BasePanel
import ConceptReference
from Borg import Borg
class ConceptReferencePanel(BasePanel):
def __init__(self,parent):
BasePanel.__init__(self,parent,armid.CONCEPTREFERENCE_ID)
self.theId = None
b = Borg()
self.dbProxy = b.dbProxy
def buildControls(self,isCreate,isUpdateable=True):
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(self.buildTextSizer('Name',(87,30),armid.CONCEPTREFERENCE_TEXTNAME_ID),0,wx.EXPAND)
dims = ['asset','attacker','countermeasure','domainproperty','environment','goal','misusecase','obstacle','persona','requirement','response','risk','role','task','threat','vulnerability']
mainSizer.Add(self.buildComboSizerList('Concept',(87,30),armid.CONCEPTREFERENCE_COMBODIMNAME_ID,dims),0,wx.EXPAND)
mainSizer.Add(self.buildComboSizerList('Object',(87,30),armid.CONCEPTREFERENCE_COMBOOBJTNAME_ID,[]),0,wx.EXPAND)
mainSizer.Add(self.buildMLTextSizer('Description',(87,30),armid.CONCEPTREFERENCE_TEXTDESCRIPTION_ID),1,wx.EXPAND)
mainSizer.Add(self.buildCommitButtonSizer(armid.CONCEPTREFERENCE_BUTTONCOMMIT_ID,isCreate),0,wx.CENTER)
wx.EVT_COMBOBOX(self,armid.CONCEPTREFERENCE_COMBODIMNAME_ID,self.onDimensionChange)
self.SetSizer(mainSizer)
def loadControls(self,objt,isReadOnly=False):
self.theId = objt.id()
nameCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_TEXTNAME_ID)
dimCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_COMBODIMNAME_ID)
objtCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_COMBOOBJTNAME_ID)
descCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_TEXTDESCRIPTION_ID)
nameCtrl.SetValue(objt.name())
dimCtrl.SetValue(objt.dimension())
objtCtrl.SetValue(objt.objectName())
descCtrl.SetValue(objt.description())
def onDimensionChange(self,evt):
dimName = evt.GetString()
objts = self.dbProxy.getDimensionNames(dimName)
objtCtrl = self.FindWindowById(armid.CONCEPTREFERENCE_COMBOOBJTNAME_ID)
objtCtrl.SetItems(objts)
"""
Dummy database backend for Django.
Django uses this if the database ENGINE setting is empty (None or empty string).
Each of these API functions, except connection.close(), raises
ImproperlyConfigured.
"""
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import *
from django.db.backends.creation import BaseDatabaseCreation
def complain(*args, **kwargs):
raise ImproperlyConfigured("settings.DATABASES is improperly configured. "
"Please supply the ENGINE value. Check "
"settings documentation for more details.")
def ignore(*args, **kwargs):
pass
class DatabaseError(Exception):
pass
class IntegrityError(DatabaseError):
pass
class DatabaseOperations(BaseDatabaseOperations):
quote_name = complain
class DatabaseClient(BaseDatabaseClient):
runshell = complain
class DatabaseCreation(BaseDatabaseCreation):
create_test_db = ignore
destroy_test_db = ignore
class DatabaseIntrospection(BaseDatabaseIntrospection):
get_table_list = complain
get_table_description = complain
get_relations = complain
get_indexes = complain
get_key_columns = complain
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {}
# Override the base class implementations with null
# implementations. Anything that tries to actually
# do something raises complain; anything that tries
# to rollback or undo something raises ignore.
_commit = complain
_rollback = ignore
enter_transaction_management = complain
leave_transaction_management = ignore
set_dirty = complain
set_clean = complain
commit_unless_managed = complain
rollback_unless_managed = ignore
savepoint = ignore
savepoint_commit = complain
savepoint_rollback = ignore
close = ignore
cursor = complain
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = BaseDatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
import logging
import os
from openpyxl import Workbook
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s \t', level=logging.INFO)
EXCEL_DIR = '/home/lucasx/PycharmProjects/DataHouse/DataSet/'
def write_excel(list_, filename):
mkdirs_if_not_exists(EXCEL_DIR)
wb = Workbook()
ws = wb.active
ws.title = "HouseInfo"
ws.cell(row=1, column=1).value = 'address'
ws.cell(row=1, column=2).value = 'area'
ws.cell(row=1, column=3).value = 'block'
ws.cell(row=1, column=4).value = 'buildYear'
ws.cell(row=1, column=5).value = 'image'
ws.cell(row=1, column=6).value = 'midPrice'
ws.cell(row=1, column=7).value = 'name'
ws.cell(row=1, column=8).value = 'saleNum'
ws.cell(row=1, column=9).value = 'url'
rownum = 2
for each_item in list_:
ws.cell(row=rownum, column=1).value = each_item.address
ws.cell(row=rownum, column=2).value = each_item.area
ws.cell(row=rownum, column=3).value = each_item.block
ws.cell(row=rownum, column=4).value = each_item.buildYear
ws.cell(row=rownum, column=5).value = each_item.image
ws.cell(row=rownum, column=6).value = each_item.midPrice
ws.cell(row=rownum, column=7).value = each_item.name
ws.cell(row=rownum, column=8).value = each_item.saleNum
ws.cell(row=rownum, column=9).value = each_item.url
rownum += 1
wb.save(EXCEL_DIR + filename + '.xlsx')
logging.info('Excel生成成功!')
def mkdirs_if_not_exists(directory_):
"""create a new folder if it does not exist"""
if not os.path.exists(directory_) or not os.path.isdir(directory_):
os.makedirs(directory_)
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""
# Python imports
import os
# Local imports
from .pgen2 import token
from .pgen2 import driver
from . import pytree
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class Symbols(object):
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.iteritems():
setattr(self, name, symbol)
python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL ().
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, float_compare, float_round
from openerp import SUPERUSER_ID
from dateutil.relativedelta import relativedelta
from datetime import datetime
from psycopg2 import OperationalError
import openerp
class procurement_group(osv.osv):
_inherit = 'procurement.group'
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner')
}
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
result = super(procurement_rule, self)._get_action(cr, uid, context=context)
return result + [('move', _('Move From Another Location'))]
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids):
res += [x.id for x in route.pull_ids]
return res
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'),
'location_src_id': fields.many2one('stock.location', 'Source Location',
help="Source location is action=move"),
'route_id': fields.many2one('stock.location.route', 'Route',
help="If route_id is False, the rule is global"),
'procure_method': fields.selection([('make_to_stock', 'Take From Stock'), ('make_to_order', 'Create Procurement')], 'Move Supply Method', required=True,
help="""Determines the procurement method of the stock move that will be generated: whether it will need to 'take from the available stock' in its source location or needs to ignore its stock and create a procurement over there."""),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'procurement.rule': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type',
help="Picking Type determines the way the picking should be shown in the view, reports, ..."),
'delay': fields.integer('Number of Days'),
'partner_address_id': fields.many2one('res.partner', 'Partner Address'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too'),
'warehouse_id': fields.many2one('stock.warehouse', 'Served Warehouse', help='The warehouse this rule is for'),
'propagate_warehouse_id': fields.many2one('stock.warehouse', 'Warehouse to Propagate', help="The warehouse to propagate on the created move/procurement, which can be different of the warehouse this rule is for (e.g for resupplying rules from another warehouse)"),
}
_defaults = {
'procure_method': 'make_to_stock',
'propagate': True,
'delay': 0,
}
class procurement_order(osv.osv):
_inherit = "procurement.order"
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'), # not required because task may create procurements that aren't linked to a location with sale_service
'partner_dest_id': fields.many2one('res.partner', 'Customer Address', help="In case of dropshipping, we need to know the destination address more precisely"),
'move_ids': fields.one2many('stock.move', 'procurement_id', 'Moves', help="Moves created by the procurement"),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Move which caused (created) the procurement"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_procurement', 'procurement_id', 'route_id', 'Preferred Routes', help="Preferred route to be followed by the procurement order. Usually copied from the generating document (SO) but could be set up manually."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Warehouse to consider for the route selection"),
'orderpoint_id': fields.many2one('stock.warehouse.orderpoint', 'Minimum Stock Rule'),
}
def propagate_cancel(self, cr, uid, procurement, context=None):
if procurement.rule_id.action == 'move' and procurement.move_ids:
self.pool.get('stock.move').action_cancel(cr, uid, [m.id for m in procurement.move_ids], context=context)
def cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context)
ctx = context.copy()
#set the context for the propagation of the procurement cancelation
ctx['cancel_procurement'] = True
for procurement in self.browse(cr, uid, to_cancel_ids, context=ctx):
self.propagate_cancel(cr, uid, procurement, context=ctx)
return super(procurement_order, self).cancel(cr, uid, to_cancel_ids, context=ctx)
def _find_parent_locations(self, cr, uid, procurement, context=None):
location = procurement.location_id
res = [location.id]
while location.location_id:
location = location.location_id
res.append(location.id)
return res
def change_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
return {'value': {'location_id': warehouse.lot_stock_id.id}}
return {}
def _search_suitable_rule(self, cr, uid, procurement, domain, context=None):
'''we try to first find a rule among the ones defined on the procurement order group and if none is found, we try on the routes defined for the product, and finally we fallback on the default behavior'''
pull_obj = self.pool.get('procurement.rule')
warehouse_route_ids = []
if procurement.warehouse_id:
domain += ['|', ('warehouse_id', '=', procurement.warehouse_id.id), ('warehouse_id', '=', False)]
warehouse_route_ids = [x.id for x in procurement.warehouse_id.route_ids]
product_route_ids = [x.id for x in procurement.product_id.route_ids + procurement.product_id.categ_id.total_route_ids]
procurement_route_ids = [x.id for x in procurement.route_ids]
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', procurement_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', product_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = warehouse_route_ids and pull_obj.search(cr, uid, domain + [('route_id', 'in', warehouse_route_ids)], order='route_sequence, sequence', context=context) or []
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
return res
def _find_suitable_rule(self, cr, uid, procurement, context=None):
rule_id = super(procurement_order, self)._find_suitable_rule(cr, uid, procurement, context=context)
if not rule_id:
#a rule defined on 'Stock' is suitable for a procurement in 'Stock\Bin A'
all_parent_location_ids = self._find_parent_locations(cr, uid, procurement, context=context)
rule_id = self._search_suitable_rule(cr, uid, procurement, [('location_id', 'in', all_parent_location_ids)], context=context)
rule_id = rule_id and rule_id[0] or False
return rule_id
def _run_move_create(self, cr, uid, procurement, context=None):
''' Returns a dictionary of values that will be used to create a stock move from a procurement.
This function assumes that the given procurement has a rule (action == 'move') set on it.
:param procurement: browse record
:rtype: dictionary
'''
newdate = (datetime.strptime(procurement.date_planned, '%Y-%m-%d %H:%M:%S') - relativedelta(days=procurement.rule_id.delay or 0)).strftime('%Y-%m-%d %H:%M:%S')
group_id = False
if procurement.rule_id.group_propagation_option == 'propagate':
group_id = procurement.group_id and procurement.group_id.id or False
elif procurement.rule_id.group_propagation_option == 'fixed':
group_id = procurement.rule_id.group_id and procurement.rule_id.group_id.id or False
#it is possible that we've already got some move done, so check for the done qty and create
#a new move with the correct qty
already_done_qty = 0
already_done_qty_uos = 0
for move in procurement.move_ids:
already_done_qty += move.product_uom_qty if move.state == 'done' else 0
already_done_qty_uos += move.product_uos_qty if move.state == 'done' else 0
qty_left = max(procurement.product_qty - already_done_qty, 0)
qty_uos_left = max(procurement.product_uos_qty - already_done_qty_uos, 0)
vals = {
'name': procurement.name,
'company_id': procurement.rule_id.company_id.id or procurement.rule_id.location_src_id.company_id.id or procurement.rule_id.location_id.company_id.id or procurement.company_id.id,
'product_id': procurement.product_id.id,
'product_uom': procurement.product_uom.id,
'product_uom_qty': qty_left,
'product_uos_qty': (procurement.product_uos and qty_uos_left) or qty_left,
'product_uos': (procurement.product_uos and procurement.product_uos.id) or procurement.product_uom.id,
'partner_id': procurement.rule_id.partner_address_id.id or (procurement.group_id and procurement.group_id.partner_id.id) or False,
'location_id': procurement.rule_id.location_src_id.id,
'location_dest_id': procurement.location_id.id,
'move_dest_id': procurement.move_dest_id and procurement.move_dest_id.id or False,
'procurement_id': procurement.id,
'rule_id': procurement.rule_id.id,
'procure_method': procurement.rule_id.procure_method,
'origin': procurement.origin,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in procurement.route_ids],
'warehouse_id': procurement.rule_id.propagate_warehouse_id.id or procurement.rule_id.warehouse_id.id,
'date': newdate,
'date_expected': newdate,
'propagate': procurement.rule_id.propagate,
'priority': procurement.priority,
}
return vals
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'move':
if not procurement.rule_id.location_src_id:
self.message_post(cr, uid, [procurement.id], body=_('No source location defined!'), context=context)
return False
move_obj = self.pool.get('stock.move')
move_dict = self._run_move_create(cr, uid, procurement, context=context)
#create the move as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example)
move_obj.create(cr, SUPERUSER_ID, move_dict, context=context)
return True
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def run(self, cr, uid, ids, autocommit=False, context=None):
new_ids = [x.id for x in self.browse(cr, uid, ids, context=context) if x.state not in ('running', 'done', 'cancel')]
res = super(procurement_order, self).run(cr, uid, new_ids, autocommit=autocommit, context=context)
#after all the procurements are run, check if some created a draft stock move that needs to be confirmed
#(we do that in batch because it fasts the picking assignation and the picking state computation)
move_to_confirm_ids = []
for procurement in self.browse(cr, uid, new_ids, context=context):
if procurement.state == "running" and procurement.rule_id and procurement.rule_id.action == "move":
move_to_confirm_ids += [m.id for m in procurement.move_ids if m.state == 'draft']
if move_to_confirm_ids:
self.pool.get('stock.move').action_confirm(cr, uid, move_to_confirm_ids, context=context)
return res
def _check(self, cr, uid, procurement, context=None):
''' Implement the procurement checking for rules of type 'move'. The procurement will be satisfied only if all related
moves are done/cancel and if the requested quantity is moved.
'''
if procurement.rule_id and procurement.rule_id.action == 'move':
uom_obj = self.pool.get('product.uom')
# In case Phantom BoM splits only into procurements
if not procurement.move_ids:
return True
cancel_test_list = [x.state == 'cancel' for x in procurement.move_ids]
done_cancel_test_list = [x.state in ('done', 'cancel') for x in procurement.move_ids]
at_least_one_cancel = any(cancel_test_list)
all_done_or_cancel = all(done_cancel_test_list)
all_cancel = all(cancel_test_list)
if not all_done_or_cancel:
return False
elif all_done_or_cancel and not all_cancel:
return True
elif all_cancel:
self.message_post(cr, uid, [procurement.id], body=_('All stock moves have been cancelled for this procurement.'), context=context)
self.write(cr, uid, [procurement.id], {'state': 'cancel'}, context=context)
return False
return super(procurement_order, self)._check(cr, uid, procurement, context)
def do_view_pickings(self, cr, uid, ids, context=None):
'''
This function returns an action that display the pickings of the procurements belonging
to the same procurement group of given ids.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'do_view_pickings')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id])
result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]"
return result
def run_scheduler(self, cr, uid, use_new_cursor=False, company_id=False, context=None):
'''
Call the scheduler in order to check the running procurements (super method), to check the minimum stock rules
and the availability of moves. This function is intended to be run for all the companies at the same time, so
we run functions as SUPERUSER to avoid intercompanies and access rights issues.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
@param context: A standard dictionary for contextual values
@return: Dictionary of values
'''
super(procurement_order, self).run_scheduler(cr, uid, use_new_cursor=use_new_cursor, company_id=company_id, context=context)
if context is None:
context = {}
try:
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
move_obj = self.pool.get('stock.move')
#Minimum stock rules
self._procure_orderpoint_confirm(cr, SUPERUSER_ID, use_new_cursor=use_new_cursor, company_id=company_id, context=context)
#Search all confirmed stock_moves and try to assign them
confirmed_ids = move_obj.search(cr, uid, [('state', '=', 'confirmed')], limit=None, order='priority desc, date_expected asc', context=context)
for x in xrange(0, len(confirmed_ids), 100):
move_obj.action_assign(cr, uid, confirmed_ids[x:x + 100], context=context)
if use_new_cursor:
cr.commit()
if use_new_cursor:
cr.commit()
finally:
if use_new_cursor:
try:
cr.close()
except Exception:
pass
return {}
def _get_orderpoint_date_planned(self, cr, uid, orderpoint, start_date, context=None):
date_planned = start_date + relativedelta(days=orderpoint.product_id.seller_delay or 0.0)
return date_planned.strftime(DEFAULT_SERVER_DATE_FORMAT)
def _prepare_orderpoint_procurement(self, cr, uid, orderpoint, product_qty, context=None):
return {
'name': orderpoint.name,
'date_planned': self._get_orderpoint_date_planned(cr, uid, orderpoint, datetime.today(), context=context),
'product_id': orderpoint.product_id.id,
'product_qty': product_qty,
'company_id': orderpoint.company_id.id,
'product_uom': orderpoint.product_uom.id,
'location_id': orderpoint.location_id.id,
'origin': orderpoint.name,
'warehouse_id': orderpoint.warehouse_id.id,
'orderpoint_id': orderpoint.id,
'group_id': orderpoint.group_id.id,
}
def _product_virtual_get(self, cr, uid, order_point):
product_obj = self.pool.get('product.product')
return product_obj._product_available(cr, uid,
[order_point.product_id.id],
context={'location': order_point.location_id.id})[order_point.product_id.id]['virtual_available']
def _procure_orderpoint_confirm(self, cr, uid, use_new_cursor=False, company_id = False, context=None):
'''
Create procurement based on Orderpoint
:param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
'''
if context is None:
context = {}
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
orderpoint_obj = self.pool.get('stock.warehouse.orderpoint')
procurement_obj = self.pool.get('procurement.order')
dom = company_id and [('company_id', '=', company_id)] or []
orderpoint_ids = orderpoint_obj.search(cr, uid, dom)
prev_ids = []
while orderpoint_ids:
ids = orderpoint_ids[:100]
del orderpoint_ids[:100]
for op in orderpoint_obj.browse(cr, uid, ids, context=context):
try:
prods = self._product_virtual_get(cr, uid, op)
if prods is None:
continue
if float_compare(prods, op.product_min_qty, precision_rounding=op.product_uom.rounding) < 0:
qty = max(op.product_min_qty, op.product_max_qty) - prods
reste = op.qty_multiple > 0 and qty % op.qty_multiple or 0.0
if float_compare(reste, 0.0, precision_rounding=op.product_uom.rounding) > 0:
qty += op.qty_multiple - reste
if float_compare(qty, 0.0, precision_rounding=op.product_uom.rounding) <= 0:
continue
qty -= orderpoint_obj.subtract_procurements(cr, uid, op, context=context)
qty_rounded = float_round(qty, precision_rounding=op.product_uom.rounding)
if qty_rounded > 0:
proc_id = procurement_obj.create(cr, uid,
self._prepare_orderpoint_procurement(cr, uid, op, qty_rounded, context=context),
context=context)
self.check(cr, uid, [proc_id])
self.run(cr, uid, [proc_id])
if use_new_cursor:
cr.commit()
except OperationalError:
if use_new_cursor:
orderpoint_ids.append(op.id)
cr.rollback()
continue
else:
raise
if use_new_cursor:
cr.commit()
if prev_ids == ids:
break
else:
prev_ids = ids
if use_new_cursor:
cr.commit()
cr.close()
return {}
# (c) 2012, Michael DeHaan
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: indexed_items
author: Michael DeHaan
version_added: "1.3"
short_description: rewrites lists to return 'indexed items'
description:
- use this lookup if you want to loop over an array and also get the numeric index of where you are in the array as you go
- any list given will be transformed with each resulting element having the it's previous position in item.0 and its value in item.1
options:
_terms:
description: list of items
required: True
"""
EXAMPLES = """
- name: indexed loop demo
debug:
msg: "at array position {{ item.0 }} there is a value {{ item.1 }}"
with_indexed_items:
- "{{ some_list }}"
"""
RETURN = """
_raw:
description:
- list with each item.0 giving you the postiion and item.1 the value
type: list
"""
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
raise AnsibleError("with_indexed_items expects a list")
items = self._flatten(terms)
return list(zip(range(len(items)), items))
import json
from psycopg2.extras import Json
from django.contrib.postgres import forms, lookups
from django.core import exceptions
from django.db.models import Field, Transform
from django.utils.translation import ugettext_lazy as _
__all__ = ['JSONField']
class JSONField(Field):
empty_strings_allowed = False
description = _('A JSON object')
default_error_messages = {
'invalid': _("Value must be valid JSON."),
}
def db_type(self, connection):
return 'jsonb'
def get_transform(self, name):
transform = super(JSONField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def get_prep_value(self, value):
if value is not None:
return Json(value)
return value
def get_prep_lookup(self, lookup_type, value):
if lookup_type in ('has_key', 'has_keys', 'has_any_keys'):
return value
if isinstance(value, (dict, list)):
return Json(value)
return super(JSONField, self).get_prep_lookup(lookup_type, value)
def validate(self, value, model_instance):
super(JSONField, self).validate(value, model_instance)
try:
json.dumps(value)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def value_to_string(self, obj):
value = self.value_from_object(obj)
return value
def formfield(self, **kwargs):
defaults = {'form_class': forms.JSONField}
defaults.update(kwargs)
return super(JSONField, self).formfield(**defaults)
JSONField.register_lookup(lookups.DataContains)
JSONField.register_lookup(lookups.ContainedBy)
JSONField.register_lookup(lookups.HasKey)
JSONField.register_lookup(lookups.HasKeys)
JSONField.register_lookup(lookups.HasAnyKeys)
class KeyTransform(Transform):
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
key_transforms = [self.key_name]
previous = self.lhs
while isinstance(previous, KeyTransform):
key_transforms.insert(0, previous.key_name)
previous = previous.lhs
lhs, params = compiler.compile(previous)
if len(key_transforms) > 1:
return "{} #> %s".format(lhs), [key_transforms] + params
try:
int(self.key_name)
except ValueError:
lookup = "'%s'" % self.key_name
else:
lookup = "%s" % self.key_name
return "%s -> %s" % (lhs, lookup), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from mediadrop.lib.test.pythonic_testcase import *
from mediadrop.plugin.events import Event, observes
class ObserveDecoratorTest(PythonicTestCase):
def test_catches_unknown_keyword_parameters_in_constructor(self):
e = assert_raises(TypeError, lambda: observes(Event(), invalid=True))
assert_equals("TypeError: observes() got an unexpected keyword argument 'invalid'",
e.args[0])
def probe(self, result):
pass
def test_can_observe_event(self):
event = Event([])
observes(event)(self.probe)
assert_length(1, event.observers)
assert_equals(self.probe, event.observers[0])
def test_observers_can_request_priority(self):
def second_probe(result):
pass
event = Event([])
observes(event)(self.probe)
observes(event, appendleft=True)(second_probe)
assert_length(2, event.observers)
assert_equals([second_probe, self.probe], list(event.observers))
import unittest
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ObserveDecoratorTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Bruno Cauet
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Create freedesktop.org-compliant thumbnails for album folders
This plugin is POSIX-only.
Spec: standards.freedesktop.org/thumbnail-spec/latest/index.html
"""
from __future__ import division, absolute_import, print_function
from hashlib import md5
import os
import shutil
from itertools import chain
from pathlib import PurePosixPath
import ctypes
import ctypes.util
from xdg import BaseDirectory
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, decargs
from beets import util
from beets.util.artresizer import ArtResizer, get_im_version, get_pil_version
import six
BASE_DIR = os.path.join(BaseDirectory.xdg_cache_home, "thumbnails")
NORMAL_DIR = util.bytestring_path(os.path.join(BASE_DIR, "normal"))
LARGE_DIR = util.bytestring_path(os.path.join(BASE_DIR, "large"))
class ThumbnailsPlugin(BeetsPlugin):
def __init__(self):
super(ThumbnailsPlugin, self).__init__()
self.config.add({
'auto': True,
'force': False,
'dolphin': False,
})
self.write_metadata = None
if self.config['auto'] and self._check_local_ok():
self.register_listener('art_set', self.process_album)
def commands(self):
thumbnails_command = Subcommand("thumbnails",
help=u"Create album thumbnails")
thumbnails_command.parser.add_option(
u'-f', u'--force',
dest='force', action='store_true', default=False,
help=u'force regeneration of thumbnails deemed fine (existing & '
u'recent enough)')
thumbnails_command.parser.add_option(
u'--dolphin', dest='dolphin', action='store_true', default=False,
help=u"create Dolphin-compatible thumbnail information (for KDE)")
thumbnails_command.func = self.process_query
return [thumbnails_command]
def process_query(self, lib, opts, args):
self.config.set_args(opts)
if self._check_local_ok():
for album in lib.albums(decargs(args)):
self.process_album(album)
def _check_local_ok(self):
"""Check that's everythings ready:
- local capability to resize images
- thumbnail dirs exist (create them if needed)
- detect whether we'll use PIL or IM
- detect whether we'll use GIO or Python to get URIs
"""
if not ArtResizer.shared.local:
self._log.warning(u"No local image resizing capabilities, "
u"cannot generate thumbnails")
return False
for dir in (NORMAL_DIR, LARGE_DIR):
if not os.path.exists(dir):
os.makedirs(dir)
if get_im_version():
self.write_metadata = write_metadata_im
tool = "IM"
else:
assert get_pil_version() # since we're local
self.write_metadata = write_metadata_pil
tool = "PIL"
self._log.debug(u"using {0} to write metadata", tool)
uri_getter = GioURI()
if not uri_getter.available:
uri_getter = PathlibURI()
self._log.debug(u"using {0.name} to compute URIs", uri_getter)
self.get_uri = uri_getter.uri
return True
def process_album(self, album):
"""Produce thumbnails for the album folder.
"""
self._log.debug(u'generating thumbnail for {0}', album)
if not album.artpath:
self._log.info(u'album {0} has no art', album)
return
if self.config['dolphin']:
self.make_dolphin_cover_thumbnail(album)
size = ArtResizer.shared.get_size(album.artpath)
if not size:
self._log.warning(u'problem getting the picture size for {0}',
album.artpath)
return
wrote = True
if max(size) >= 256:
wrote &= self.make_cover_thumbnail(album, 256, LARGE_DIR)
wrote &= self.make_cover_thumbnail(album, 128, NORMAL_DIR)
if wrote:
self._log.info(u'wrote thumbnail for {0}', album)
else:
self._log.info(u'nothing to do for {0}', album)
def make_cover_thumbnail(self, album, size, target_dir):
"""Make a thumbnail of given size for `album` and put it in
`target_dir`.
"""
target = os.path.join(target_dir, self.thumbnail_file_name(album.path))
if os.path.exists(target) and \
os.stat(target).st_mtime > os.stat(album.artpath).st_mtime:
if self.config['force']:
self._log.debug(u"found a suitable {1}x{1} thumbnail for {0}, "
u"forcing regeneration", album, size)
else:
self._log.debug(u"{1}x{1} thumbnail for {0} exists and is "
u"recent enough", album, size)
return False
resized = ArtResizer.shared.resize(size, album.artpath,
util.syspath(target))
self.add_tags(album, util.syspath(resized))
shutil.move(resized, target)
return True
def thumbnail_file_name(self, path):
"""Compute the thumbnail file name
See https://standards.freedesktop.org/thumbnail-spec/latest/x227.html
"""
uri = self.get_uri(path)
hash = md5(uri.encode('utf-8')).hexdigest()
return util.bytestring_path("{0}.png".format(hash))
def add_tags(self, album, image_path):
"""Write required metadata to the thumbnail
See https://standards.freedesktop.org/thumbnail-spec/latest/x142.html
"""
mtime = os.stat(album.artpath).st_mtime
metadata = {"Thumb::URI": self.get_uri(album.artpath),
"Thumb::MTime": six.text_type(mtime)}
try:
self.write_metadata(image_path, metadata)
except Exception:
self._log.exception(u"could not write metadata to {0}",
util.displayable_path(image_path))
def make_dolphin_cover_thumbnail(self, album):
outfilename = os.path.join(album.path, b".directory")
if os.path.exists(outfilename):
return
artfile = os.path.split(album.artpath)[1]
with open(outfilename, 'w') as f:
f.write('[Desktop Entry]\n')
f.write('Icon=./{0}'.format(artfile.decode('utf-8')))
f.close()
self._log.debug(u"Wrote file {0}", util.displayable_path(outfilename))
def write_metadata_im(file, metadata):
"""Enrich the file metadata with `metadata` dict thanks to IM."""
command = ['convert', file] + \
list(chain.from_iterable(('-set', k, v)
for k, v in metadata.items())) + [file]
util.command_output(command)
return True
def write_metadata_pil(file, metadata):
"""Enrich the file metadata with `metadata` dict thanks to PIL."""
from PIL import Image, PngImagePlugin
im = Image.open(file)
meta = PngImagePlugin.PngInfo()
for k, v in metadata.items():
meta.add_text(k, v, 0)
im.save(file, "PNG", pnginfo=meta)
return True
class URIGetter(object):
available = False
name = "Abstract base"
def uri(self, path):
raise NotImplementedError()
class PathlibURI(URIGetter):
available = True
name = "Python Pathlib"
def uri(self, path):
return PurePosixPath(path).as_uri()
def copy_c_string(c_string):
"""Copy a `ctypes.POINTER(ctypes.c_char)` value into a new Python
string and return it. The old memory is then safe to free.
"""
# This is a pretty dumb way to get a string copy, but it seems to
# work. A more surefire way would be to allocate a ctypes buffer and copy
# the data with `memcpy` or somesuch.
s = ctypes.cast(c_string, ctypes.c_char_p).value
return b'' + s
class GioURI(URIGetter):
"""Use gio URI function g_file_get_uri. Paths must be utf-8 encoded.
"""
name = "GIO"
def __init__(self):
self.libgio = self.get_library()
self.available = bool(self.libgio)
if self.available:
self.libgio.g_type_init() # for glib < 2.36
self.libgio.g_file_get_uri.argtypes = [ctypes.c_char_p]
self.libgio.g_file_new_for_path.restype = ctypes.c_void_p
self.libgio.g_file_get_uri.argtypes = [ctypes.c_void_p]
self.libgio.g_file_get_uri.restype = ctypes.POINTER(ctypes.c_char)
self.libgio.g_object_unref.argtypes = [ctypes.c_void_p]
def get_library(self):
lib_name = ctypes.util.find_library("gio-2")
try:
if not lib_name:
return False
return ctypes.cdll.LoadLibrary(lib_name)
except OSError:
return False
def uri(self, path):
g_file_ptr = self.libgio.g_file_new_for_path(path)
if not g_file_ptr:
raise RuntimeError(u"No gfile pointer received for {0}".format(
util.displayable_path(path)))
try:
uri_ptr = self.libgio.g_file_get_uri(g_file_ptr)
finally:
self.libgio.g_object_unref(g_file_ptr)
if not uri_ptr:
self.libgio.g_free(uri_ptr)
raise RuntimeError(u"No URI received from the gfile pointer for "
u"{0}".format(util.displayable_path(path)))
try:
uri = copy_c_string(uri_ptr)
finally:
self.libgio.g_free(uri_ptr)
try:
return uri.decode(util._fsencoding())
except UnicodeDecodeError:
raise RuntimeError(
"Could not decode filename from GIO: {!r}".format(uri)
)
#!/usr/bin/env kross
import urllib, Kross, KSpread
T = Kross.module("kdetranslation")
class MyConfig:
def __init__(self):
self.url = "http://127.0.0.1:20433"
self.sheetRange = "A1:F50"
self.cellNameOnSelectionChanged = True
self.cellValueOnSelectionChanged = True #TODO
self.sheetNameOnSheetChanged = True
class MyOrca:
def __init__(self, config):
self.config = config
def _send(self, data):
f = urllib.urlopen(self.config.url, data)
s = f.read()
f.close()
return s
def speak(self, text):
self._send("speak:%s" % text)
def stop(self):