[-]
[+]
|
Changed |
func.spec
|
|
[-]
[+]
|
Deleted |
func-0.24.tar.gz/func/codes.py
^
|
@@ -1,26 +0,0 @@
-#!/usr/bin/python
-"""
-func
-
-Copyright 2007, Red Hat, Inc
-See AUTHORS
-
-This software may be freely redistributed under the terms of the GNU
-general public license.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-"""
-
-import exceptions
-
-
-class FuncException(exceptions.Exception):
- pass
-
-
-class InvalidMethodException(FuncException):
- pass
-
-# FIXME: more sub-exceptions maybe
|
[-]
[+]
|
Deleted |
func-0.24.tar.gz/func/minion/modules/meta.py
^
|
@@ -1,26 +0,0 @@
-#
-# Copyright 2008
-# Adrian Likins <alikins@redhat.com> <alikins@redhat.com>
-#
-# This software may be freely redistributed under the terms of the GNU
-# general public license.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-import func_module
-
-
-class Meta(func_module.FuncModule):
-
- # Update these if need be.
- version = "0.0.1"
- api_version = "0.0.1"
- description = "Describes meta information about func itself"
-
- def inventory(self):
- """
- TODO: Document me ...
- """
- pass
|
[-]
[+]
|
Deleted |
func-0.24.tar.gz/version
^
|
@@ -1 +0,0 @@
-0.24 1
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/AUTHORS
^
|
@@ -10,8 +10,10 @@
Krzysztof A. Adamski <krzysztofa@gmail.com>
James Anthill <james@mail.and.org>
+ Tim Bielawa <timbielawa@gmail.com>
James Bowes <jbowes@redhat.com>
Pádraig Brady <P@draigBrady.com>
+ James Cammarata <jimi@sngx.net>
Jasper Capel <capel@stone-it.com>
Louis Coilliot <louis.coilliot@gmail.com>
Eli Criffield <elicriffield@gmail.com>
@@ -19,19 +21,27 @@
Luca Foppiano <lfoppiano@byte-code.com>
Matt Hicks <mhicks@redhat.com>
Scott Henson <shenson@redhat.com>
+ Léon Keijser <keijser@stone-it.com>
Tanabe Ken-ichi <nabeken@tknetworks.org>
Pradeep Kilambi <pkilambi@redhat.com>
Denis Kurov <makkalot@gmail.com>
Vito Laurenza <vitolaurenza@gmail.com>
Brenton Leanhardt <bleanhar@redhat.com>
Luke Macken <lmacken@redhat.com>
+ Gregory Masseau <gjmasseau@learn.senecac.on.ca>
Steve Milner <smilner@redhat.com>
Marco Mornati <mmornati@byte-code.com>
Stephen Nelson-Smith <atalanta.systems@googlemail.com>
Robin Norwood <rnorwood@redhat.com>
+ Milton Paiva Neto <milton.paiva@gmail.com>
+ Simone Pucci <spucci@byte-code.com>
Dan Radez <dradez@redhat.com>
+ Jesus M. Rodriguez <jmrodri@redhat.com>
Steve Salevan <ssalevan@redhat.com>
Silas Sewell <silas@sewell.ch>
+ Wouter Spee <wouter@spilgames.com>
+ Greg Swift <gregswift@gmail.com>
+ Nima Talebi <nima@it.net.au>
Al Tobey <tobert@gmail.com>
Tim Verhoeven <tim.verhoeven.be@gmail.com>
Alex Wood <awood@redhat.com>
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/CHANGES
^
|
@@ -0,0 +1,139 @@
+Changes in 0.25
+
+bugs fixed:
+
+
+ https://fedorahosted.org/func/ticket/76:
+ umask not set correctly
+ (fixed, change in certmaster/utils.py)
+
+ https://fedorahosted.org/func/ticket/72:
+ cli timeout needed
+
+ https://fedorahosted.org/func/ticket/70:
+ func/certmaster need a exception handler to catch and log exceptions
+
+ https://fedorahosted.org/func/ticket/75:
+ func --help as non-root gives traceback
+
+ https://fedorahosted.org/func/ticket/74:
+ Need permissions on logfile to run func --help
+
+ https://fedorahosted.org/func/ticket/69:
+ Incorrect homepage field in setup.py
+
+ some untracked bug fixes:
+ - init script lsb functions werent returning proper exit codes
+ - more changes to how a minion finds it's hostname
+ - remove the use of the "version" file in the build to
+ be a bit friendlier to the automated fedora build tools
+ - remove #! paths from most stuff except what needs it
+ - Fixed bridge module listing issue when vifX.Y interfaces were present in brctl show
+
+Features added:
+ - pullfile module added by Léon Keijser
+ - add some subgroup capabilities to the grouping functionality
+ - add support for --timeout command line option and socket_timeout
+ option to /etc/func/overlord.conf
+ - add /etc/func/overlord.conf
+ - add "minion_name" config option to specify the hostname the
+ minion should use
+ - rpm module now has a "verify" method, (Milton Paiva Neto)
+ - func --help sh
+ - add "minion-to-minion" support (see https://fedorahosted.org/func/wiki/MinionToMinion)
+ - add --basic support to "func call"
+ - support for setting up vnc in the virt module
+ - support for getting/setting libvirt xml directly (Simone Pucci)
+ - man pages for all executables now (Nima Talebi, Steve Salevan)
+ - include a monit config file
+ - loadavg method for process module
+ - users.py unix user and group module (Gregory Masseau)
+ - add an augeas module (Louis Coilliot)
+ - add an httpd module (John Eckersberg)
+ - add bridge and vlan modules (Jasper Capel)
+ - add some support for rhel3 [using python2.3] (Greg Swift)
+ - if we get a cert request that doesn't match the current key, throw
+ a more useful error message
+
+
+Misc Changes:
+
+
+
+New Contributors
+ Léon Keijser, Milton Paiva Neto, Wouter Spee, James Cammarata,
+ Simone Pucci, Nima Talebi, Gregory Masseau, Greg Swift
+
+
+
+Changes in 0.24
+
+
+bugs fixed:
+ https://fedorahosted.org/func/ticket/67:
+ func passes open fd's to child process
+
+ https://fedorahosted.org/func/ticket/31:
+ ports are hardcoded thoughtout app
+
+ ports are now configurable, see https://fedorahosted.org/func/wiki/PortInfo
+
+ https://fedorahosted.org/func/ticket/65:
+ logrotate remove acls every week
+
+ (patch from lfoppiano)
+
+ https://fedorahosted.org/func/ticket/66:
+ Init script does not run properly in debian/ubuntu
+
+ (patch from tbielawa)
+
+ https://fedorahosted.org/func/ticket/60:
+ ValueError: substring not found' traceback caused by certain minions
+
+ (fixed with changes to certmaster in certmaster-0.24)
+
+ https://fedorahosted.org/func/ticket/61:
+ "show" sub command fails/ module_loader needs to load multiple classes per module
+
+ https://fedorahosted.org/func/ticket/32:
+ Copyfile implementation needs improvements for large files.
+
+ (patch from kadamski)
+
+
+ https://bugzilla.redhat.com/show_bug.cgi?id=474644:
+ unowned directories in func spec
+
+ Some untracked bug fixes:
+ - func-transmit sometimes sends bools as strings, so work aroung it
+ - init scripts use proper $DAEMON invocation
+ - Client api was failing when init'ed with ASync=None
+ - jobthing.py cleanups
+ - fixed "local" client api namespace
+ - minion module config useage cleanup and tested and documented
+ - updates/fixes to bridge/vlan modules
+ - init script lsb functions werent returning proper exit codes
+ - more changes to how a minion finds it's hostname
+
+
+Features added:
+
+ - command.run changes to allow shell wildcards and redirects, test cases added
+ - ports that func/certmaster run on are now configurable
+ - filetracker module now supports globs
+ - func_getargs.py module added for more introspection support
+ - system.inventory() method that returns a list of all available modules/methods for use
+ with func-inventory
+ - djangoctrl module added by Steve Milner
+ - pullfile module added by Léon Keijser
+ - add some subgroup capabilities to the grouping functionality
+
+
+other misc changes
+ - lots of new test cases, support for running code coverage for unit tests
+ - lots of code cleanup
+ - some new docs (sslInfo, PortInfo, AixSetupGuide)
+
+New Contributors
+ Silas Sewell, Louis Coilliot, Jasper Capel, Léon Keijser, Milton Paiva Neto
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/PKG-INFO
^
|
@@ -1,8 +1,8 @@
Metadata-Version: 1.0
Name: func
-Version: 0.24
+Version: 0.28
Summary: func remote configuration and management api
-Home-page: https://hosted.fedoraproject.org/projects/func/
+Home-page: https://fedorahosted.org/func/
Author: Lots
Author-email: func-list@redhat.com
License: GPL
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/README
^
|
@@ -1,4 +1,4 @@
-func - Fedora unified Network Controller
+func - Fedora Unified Network Controller
https://fedorahosted.org/func
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/docs/confmgt_augeas-doc.txt
^
|
@@ -0,0 +1,95 @@
+Here is a brief overview of using the module confmgt_augeas for func.
+
+This module relies on Augeas (htt://augeas.net).
+Augeas is a configuration API, for handling (set, get, list...) the parameters of configuration files on the func minions.
+
+It is inspired from the 'Quick tour' of Augeas (http://augeas.net/tour.html)
+
+Below, the guinea pig minion is named 'kermit'.
+
+- Set a parameter in a configuration file: i.e. PermitRootLogin yes in sshd_config
+func 'kermit' call confmgt_augeas set '/etc/ssh/sshd_config' 'PermitRootLogin' 'yes'
+
+{'kermit': {'parameter': 'PermitRootLogin',
+ 'path': '/etc/ssh/sshd_config',
+ 'value': 'yes'}
+}
+
+The arguments are config. file, parameter and value.
+
+
+- Get a parameter in a configuration file: i.e. port in sshd_config
+func 'kermit' call confmgt_augeas get '/etc/ssh/sshd_config' 'Port'
+
+The arguments are config. file and parameter.
+
+
+{'kermit': {'parameter': 'Port',
+ 'path': '/etc/ssh/sshd_config',
+ 'value': '22'}
+}
+
+
+Most actions involve a file, a parameter, and a value.
+But Augeas purists would perhaps prefer using the very graceful path-like syntax of Augeas.
+They can do so:
+func 'kermit' call confmgt_augeas get '/etc/ssh/sshd_config/Port'
+
+{'kermit': {'parameter': 'Port',
+ 'path': '/etc/ssh/sshd_config',
+ 'value': '22'}
+}
+
+
+
+- Make sshd accept an additional environment variable
+The example is in python this time.
+
+In sshd_config some settings can be repeated in the file, and values are accumulated. These values are best viewed as arrays.
+
+To illustrate this, we will add a new environment variable FOO to the AcceptEnv setting in /etc/ssh/sshd_config.
+
+These values are mapped into a tree (see http://augeas.net for more details on augeas schemas, tree an d path expressions).
+
+import func.overlord.client as fc
+c = fc.Client("kermit")
+print c.confmgt_augeas.printconf('/etc/ssh/sshd_config/AcceptEnv')
+
+If sshd_config on minion 'kermit' contains:
+
+AcceptEnv LANG LC_CTYPE
+AcceptEnv LC_IDENTIFICATION LC_ALL FOO
+
+You'll get:
+
+{'kermit':
+ {'path': '/etc/ssh/sshd_config/AcceptEnv',
+ 'nodes':
+ [
+ ['/etc/ssh/sshd_config/AcceptEnv', '(none)'],
+ ['/etc/ssh/sshd_config/AcceptEnv[1]/1', 'LANG'],
+ ['/etc/ssh/sshd_config/AcceptEnv[1]/2', 'LC_CTYPE'],
+ ['/etc/ssh/sshd_config/AcceptEnv[2]/3', 'LC_IDENTIFICATION'],
+ ['/etc/ssh/sshd_config/AcceptEnv[2]/4', 'LC_ALL'],
+ ]
+ }
+}
+
+To add a new variable FOO at the end of the last AcceptEnv line, we perform
+
+print c.confmgt_augeas.set('/etc/ssh/sshd_config/AcceptEnv[last()]','10000','FOO')
+
+Which gives:
+
+{'kermit': {'path': '/etc/ssh/sshd_config/AcceptEnv[last()]', 'parameter': '10000', 'value': 'FOO'}}
+
+After the action (on the target minion), sshd_config contains:
+
+AcceptEnv LANG LC_CTYPE
+AcceptEnv LC_IDENTIFICATION LC_ALL FOO
+
+The addition of [last()] to AcceptEnv in the path tells Augeas that we are talking about the last node named AcceptEnv. Augeas requires that for a set. The path expression corresponds either to an existing node, or to no node at all (in which case a new node is created).
+
+'10000' is 'very big' to be sure we add the value in last position.
+
+
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/docs/func-build-map.1.gz
^
|
@@ -0,0 +1,191 @@
+.\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14)
+.\"
+.\" Standard preamble:
+.\" ========================================================================
+.de Sp \" Vertical space (when we can't use .PP)
+.if t .sp .5v
+.if n .sp
+..
+.de Vb \" Begin verbatim text
+.ft CW
+.nf
+.ne \\$1
+..
+.de Ve \" End verbatim text
+.ft R
+.fi
+..
+.\" Set up some character translations and predefined strings. \*(-- will
+.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
+.\" double quote, and \*(R" will give a right double quote. \*(C+ will
+.\" give a nicer C++. Capital omega is used to do unbreakable dashes and
+.\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff,
+.\" nothing in troff, for use with C<>.
+.tr \(*W-
+.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
+.ie n \{\
+. ds -- \(*W-
+. ds PI pi
+. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
+. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
+. ds L" ""
+. ds R" ""
+. ds C` ""
+. ds C' ""
+'br\}
+.el\{\
+. ds -- \|\(em\|
+. ds PI \(*p
+. ds L" ``
+. ds R" ''
+'br\}
+.\"
+.\" Escape single quotes in literal strings from groff's Unicode transform.
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\"
+.\" If the F register is turned on, we'll generate index entries on stderr for
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
+.\" entries marked with X<> in POD. Of course, you'll have to process the
+.\" output yourself in some meaningful fashion.
+.ie \nF \{\
+. de IX
+. tm Index:\\$1\t\\n%\t"\\$2"
+..
+. nr % 0
+. rr F
+.\}
+.el \{\
+. de IX
+..
+.\}
+.\"
+.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
+.\" Fear. Run. Save yourself. No user-serviceable parts.
+. \" fudge factors for nroff and troff
+.if n \{\
+. ds #H 0
+. ds #V .8m
+. ds #F .3m
+. ds #[ \f1
+. ds #] \fP
+.\}
+.if t \{\
+. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
+. ds #V .6m
+. ds #F 0
+. ds #[ \&
+. ds #] \&
+.\}
+. \" simple accents for nroff and troff
+.if n \{\
+. ds ' \&
+. ds ` \&
+. ds ^ \&
+. ds , \&
+. ds ~ ~
+. ds /
+.\}
+.if t \{\
+. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
+. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
+. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
+. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
+. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
+. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
+.\}
+. \" troff and (daisy-wheel) nroff accents
+.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
+.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
+.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
+.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
+.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
+.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
+.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
+.ds ae a\h'-(\w'a'u*4/10)'e
+.ds Ae A\h'-(\w'A'u*4/10)'E
+. \" corrections for vroff
+.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
+.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
+. \" for low resolution devices (crt and lpr)
+.if \n(.H>23 .if \n(.V>19 \
+\{\
+. ds : e
+. ds 8 ss
+. ds o a
+. ds d- d\h'-1'\(ga
+. ds D- D\h'-1'\(hy
+. ds th \o'bp'
+. ds Th \o'LP'
+. ds ae ae
+. ds Ae AE
+.\}
+.rm #[ #] #H #V #F C
+.\" ========================================================================
+.\"
+.IX Title "FUNC-BUILD-MAP 1"
+.TH FUNC-BUILD-MAP 1 "2009-03-13" "" "func-build-map"
+.\" For nroff, turn off justification. Always turn off hyphenation; it makes
+.\" way too many mistakes in technical documents.
+.if n .ad l
+.nh
+.SH "NAME"
+func\-build\-map \-\- Builds/augments delegation map of all available func minions
+.SH "SYNOPSIS"
+.IX Header "SYNOPSIS"
+func-build-map [\-a|\-\-append] [\-o|\-\-onlyalive] [\-v|\-\-verbose]
+.SH "DESCRIPTION"
+.IX Header "DESCRIPTION"
+Func's delegation feature allows an overlord to execute commands through proxies (minions which also run as overlords) to reduce \s-1XMLRPC\s0 overhead and the amount of time required to execute commands over large Func networks.
+.PP
+To accomplish this task, Func needs to know where each proxy and minion sits in the Func network, and for the sake of expediency, this data is stored within a map file on the overlord.
+.PP
+func-build-map, when run on an overlord, recursively probes through one's Func network, discovering its topology. When complete, it stores a mapfile, encoded in \s-1YAML\s0, in /var/lib/func/map.
+.PP
+If you utilize delegation frequently, we recommend running this tool as a cron job to ensure that your mapfile remains up to date with changes in your Func network topology.
+.SH "DELEGATION"
+.IX Header "DELEGATION"
+Run without arguments, func-build-map will rewrite any mapfile currently sitting
+in /var/lib/func.
+.PP
+To delegate commands, this mapfile must be created and kept updated. It is hence
+recommended to run this as a daily cron job on the highest overlord.
+.PP
+Note: Minions not yet in the map file will not be reached by delegation calls.
+.SH "\-a, \-\-append"
+.IX Header "-a, --append"
+Combines new Func network topology data with data from the old map file, performing a tree merge. If this merge fails, it replaces the old mapfile with a fresh version.
+.SH "\-o, \-\-onlyalive"
+.IX Header "-o, --onlyalive"
+Pings all minions and proxies and returns a map containing those which return those pings. Useful for Func network diagnostics and for those who maintain networks where minions are frequently switched on and off.
+.SH "\-v, \-\-verbose"
+.IX Header "-v, --verbose"
+Gives additional information as to what func-build-map is doing
+.SH "DELEGATES"
+.IX Header "DELEGATES"
+To get started with delegation via the Python \s-1API\s0, try the following code:
+.PP
+.Vb 2
+\& import func.overlord.client as fc
+\& my_overlord = fc.Overlord("<your glob>", delegate=True)
+.Ve
+.PP
+If you want to use an alternative delegation map file, you can add the argument
+mapfile=<your mapfile location> to the Overlord constructor to tell it to pull
+the mapping data out of it instead.
+.PP
+From this point, you can treat your delegating overlord object in the same
+manner as a non-delegating overlord. Minions that exist under multiple layers
+of overlords will appear as if they existed directly beneath your master
+overlord, so make some function calls and play around with it!
+.SH "ADDITIONAL RESOURCES"
+.IX Header "ADDITIONAL RESOURCES"
+See https://fedorahosted.org/func/ for more information.
+.PP
+For feature-specific info, visit https://fedorahosted.org/func/wiki/DelegationModule.
+.SH "SEE ALSO"
+.IX Header "SEE ALSO"
+\&\fIfunc\-create\-module\fR\|(1), \fIfunc\-transmit\fR\|(1), \fIfunc\-inventory\fR\|(1), \fIfunc\fR\|(1), \fIfuncd\fR\|(1).
+.SH "AUTHOR"
+.IX Header "AUTHOR"
+Steve Salevan <ssalevan@redhat.com>, Nima Talebi <nima@it.net.au>
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/docs/func-build-map.pod
^
|
@@ -0,0 +1,70 @@
+=head1 NAME
+
+func-build-map -- Builds/augments delegation map of all available func minions
+
+=head1 SYNOPSIS
+
+func-build-map [-a|--append] [-o|--onlyalive] [-v|--verbose]
+
+=head1 DESCRIPTION
+
+Func's delegation feature allows an overlord to execute commands through proxies (minions which also run as overlords) to reduce XMLRPC overhead and the amount of time required to execute commands over large Func networks.
+
+To accomplish this task, Func needs to know where each proxy and minion sits in the Func network, and for the sake of expediency, this data is stored within a map file on the overlord.
+
+func-build-map, when run on an overlord, recursively probes through one's Func network, discovering its topology. When complete, it stores a mapfile, encoded in YAML, in /var/lib/func/map.
+
+If you utilize delegation frequently, we recommend running this tool as a cron job to ensure that your mapfile remains up to date with changes in your Func network topology.
+
+=head1 DELEGATION
+
+Run without arguments, func-build-map will rewrite any mapfile currently sitting
+in /var/lib/func.
+
+To delegate commands, this mapfile must be created and kept updated. It is hence
+recommended to run this as a daily cron job on the highest overlord.
+
+Note: Minions not yet in the map file will not be reached by delegation calls.
+
+
+=head1 -a, --append
+
+Combines new Func network topology data with data from the old map file, performing a tree merge. If this merge fails, it replaces the old mapfile with a fresh version.
+
+=head1 -o, --onlyalive
+
+Pings all minions and proxies and returns a map containing those which return those pings. Useful for Func network diagnostics and for those who maintain networks where minions are frequently switched on and off.
+
+=head1 -v, --verbose
+
+Gives additional information as to what func-build-map is doing
+
+=head1 DELEGATES
+
+To get started with delegation via the Python API, try the following code:
+
+ import func.overlord.client as fc
+ my_overlord = fc.Overlord("<your glob>", delegate=True)
+
+If you want to use an alternative delegation map file, you can add the argument
+mapfile=<your mapfile location> to the Overlord constructor to tell it to pull
+the mapping data out of it instead.
+
+From this point, you can treat your delegating overlord object in the same
+manner as a non-delegating overlord. Minions that exist under multiple layers
+of overlords will appear as if they existed directly beneath your master
+overlord, so make some function calls and play around with it!
+
+=head1 ADDITIONAL RESOURCES
+
+See https://fedorahosted.org/func/ for more information.
+
+For feature-specific info, visit https://fedorahosted.org/func/wiki/DelegationModule.
+
+=head1 SEE ALSO
+
+func-create-module(1), func-transmit(1), func-inventory(1), func(1), funcd(1).
+
+=head1 AUTHOR
+
+Steve Salevan <ssalevan@redhat.com>, Nima Talebi <nima@it.net.au>
|
|
Added |
func-0.28.tar.bz2/docs/func-create-module.1.gz
^
|
|
Changed |
func-0.28.tar.bz2/docs/func-create-module.pod
^
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/docs/func-inventory.1.gz
^
|
@@ -1,15 +1,7 @@
-.\" Automatically generated by Pod::Man 2.16 (Pod::Simple 3.07)
+.\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14)
.\"
.\" Standard preamble:
.\" ========================================================================
-.de Sh \" Subsection heading
-.br
-.if t .Sp
-.ne 5
-.PP
-\fB\\$1\fR
-.PP
-..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
@@ -53,7 +45,7 @@
.el .ds Aq '
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
-.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.ie \nF \{\
@@ -132,7 +124,7 @@
.\" ========================================================================
.\"
.IX Title "FUNC-INVENTORY 1"
-.TH FUNC-INVENTORY 1 "2008-07-29" "" "func-inventory"
+.TH FUNC-INVENTORY 1 "2009-03-04" "" "func-inventory"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -185,7 +177,7 @@
\&\s-1RPM\s0 be installed.
.SH "ADDITONAL RESOURCES"
.IX Header "ADDITONAL RESOURCES"
-See https://hosted.fedoraproject.org/projects/func/ for more information.
+See https://fedorahosted.org/func/ for more information.
.PP
See also the manpages for \*(L"func\*(R", \*(L"funcd\*(R", \*(L"certmaster\*(R", \*(L"certmaster-ca\*(R", and \*(L"func-transmit\*(R".
.SH "AUTHOR"
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/docs/func-inventory.pod
^
|
@@ -58,7 +58,7 @@
=head1 ADDITONAL RESOURCES
-See https://hosted.fedoraproject.org/projects/func/ for more information.
+See https://fedorahosted.org/func/ for more information.
See also the manpages for "func", "funcd", "certmaster", "certmaster-ca", and "func-transmit".
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/docs/func-transmit.1.gz
^
|
@@ -1,15 +1,7 @@
-.\" Automatically generated by Pod::Man 2.16 (Pod::Simple 3.07)
+.\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14)
.\"
.\" Standard preamble:
.\" ========================================================================
-.de Sh \" Subsection heading
-.br
-.if t .Sp
-.ne 5
-.PP
-\fB\\$1\fR
-.PP
-..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
@@ -53,7 +45,7 @@
.el .ds Aq '
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
-.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.ie \nF \{\
@@ -132,7 +124,7 @@
.\" ========================================================================
.\"
.IX Title "FUNC-TRANSMIT 1"
-.TH FUNC-TRANSMIT 1 "2008-11-16" "" "func-transmit"
+.TH FUNC-TRANSMIT 1 "2009-03-04" "" "func-transmit"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -161,9 +153,9 @@
Func-transmit will exit after every command issued to the overlord, it does not await further input on the pipes.
.SH "ADDITONAL RESOURCES"
.IX Header "ADDITONAL RESOURCES"
-See https://hosted.fedoraproject.org/projects/func/ for more information.
+See https://fedorahosted.org/func/ for more information.
.PP
See also the manpages for \*(L"func-inventory\*(R", \*(L"funcd\*(R", \*(L"certmaster\*(R", and \*(L"certmaster-ca\*(R".
.SH "AUTHOR"
.IX Header "AUTHOR"
-Various. See https://hosted.fedoraproject.org/projects/func
+Various. See https://fedorahosted.org/func/
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/docs/func-transmit.pod
^
|
@@ -28,12 +28,12 @@
=head1 ADDITONAL RESOURCES
-See https://hosted.fedoraproject.org/projects/func/ for more information.
+See https://fedorahosted.org/func/ for more information.
See also the manpages for "func-inventory", "funcd", "certmaster", and "certmaster-ca".
=head1 AUTHOR
-Various. See https://hosted.fedoraproject.org/projects/func
+Various. See https://fedorahosted.org/func/
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/docs/func.1.gz
^
|
@@ -1,15 +1,7 @@
-.\" Automatically generated by Pod::Man 2.16 (Pod::Simple 3.07)
+.\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14)
.\"
.\" Standard preamble:
.\" ========================================================================
-.de Sh \" Subsection heading
-.br
-.if t .Sp
-.ne 5
-.PP
-\fB\\$1\fR
-.PP
-..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
@@ -53,7 +45,7 @@
.el .ds Aq '
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
-.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.ie \nF \{\
@@ -132,7 +124,7 @@
.\" ========================================================================
.\"
.IX Title "FUNC 1"
-.TH FUNC 1 "2008-07-29" "" "func"
+.TH FUNC 1 "2009-03-04" "" "func"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -230,9 +222,9 @@
called. See the project page (linked below) for more information.
.SH "ADDITONAL RESOURCES"
.IX Header "ADDITONAL RESOURCES"
-See https://hosted.fedoraproject.org/projects/func/ for more information, including information on scripting func from Python.
+See https://fedorahosted.org/func/ for more information, including information on scripting func from Python.
.PP
See also the manpages for \*(L"func-inventory\*(R", \*(L"funcd\*(R", \*(L"certmaster\*(R", \*(L"certmaster-ca\*(R", and \*(L"func-transmit\*(R".
.SH "AUTHOR"
.IX Header "AUTHOR"
-Various. See https://hosted.fedoraproject.org/projects/func
+Various. See https://fedorahosted.org/func/
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/docs/func.pod
^
|
@@ -103,12 +103,12 @@
=head1 ADDITONAL RESOURCES
-See https://hosted.fedoraproject.org/projects/func/ for more information, including information on scripting func from Python.
+See https://fedorahosted.org/func/ for more information, including information on scripting func from Python.
See also the manpages for "func-inventory", "funcd", "certmaster", "certmaster-ca", and "func-transmit".
=head1 AUTHOR
-Various. See https://hosted.fedoraproject.org/projects/func
+Various. See https://fedorahosted.org/func/
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/docs/funcd.1.gz
^
|
@@ -1,15 +1,7 @@
-.\" Automatically generated by Pod::Man 2.16 (Pod::Simple 3.07)
+.\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14)
.\"
.\" Standard preamble:
.\" ========================================================================
-.de Sh \" Subsection heading
-.br
-.if t .Sp
-.ne 5
-.PP
-\fB\\$1\fR
-.PP
-..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
@@ -53,7 +45,7 @@
.el .ds Aq '
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
-.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
+.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.ie \nF \{\
@@ -132,13 +124,13 @@
.\" ========================================================================
.\"
.IX Title "FUNCD 1"
-.TH FUNCD 1 "2008-11-16" "" "funcd"
+.TH FUNCD 1 "2009-03-04" "" "funcd"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
.nh
.SH "NAME"
-\&\fBfuncd\fR \- deaemon for the Fedora Universal Network Controller
+funcd \- deaemon for the Fedora Universal Network Controller
.SH "SYNOPSIS"
.IX Header "SYNOPSIS"
.IP "funcd [\-\-daemon]" 4
@@ -151,7 +143,7 @@
.IX Header "DESCRIPTION"
funcd registers itself to a certificate server (certmaster) listed in /etc/certmaster/minion.conf and takes orders from the command line func when that program is run from that certificate server.
.PP
-Modules and capabilities provided by funcd are specified at https://hosted.fedoraproject.org/projects/func/
+Modules and capabilities provided by funcd are specified at https://fedorahosted.org/func/
.SH "OPTIONS"
.IX Header "OPTIONS"
.IP "\fB\-\-daemon\fR daemonize the server on startup" 16
@@ -190,16 +182,10 @@
.PD
.SH "ADDITONAL RESOURCES"
.IX Header "ADDITONAL RESOURCES"
-See the project wiki at https://hosted.fedoraproject.org/projects/func/
+See the project wiki at https://fedorahosted.org/func/
.SH "SEE ALSO"
.IX Header "SEE ALSO"
-\&\fIfunc\fR\|(1), \fIcertmaster\fR\|(1), \fIcertmaster\-ca\fR\|(1), \fIfunc\-transmit\fR\|(1)
+\&\f(CWfunc(1)\fR, \f(CWcertmaster(1)\fR, \f(CW\*(C`certmaster\-ca(1)\*(C'\fR, \f(CW\*(C`func\-transmit(1)\*(C'\fR
.SH "AUTHOR"
.IX Header "AUTHOR"
-Various, see https://hosted.fedoraproject.org/projects/func
-.SH "POD ERRORS"
-.IX Header "POD ERRORS"
-Hey! \fBThe above document had some coding errors, which are explained below:\fR
-.IP "Around line 61:" 4
-.IX Item "Around line 61:"
-You forgot a '=back' before '=head1'
+Various, see https://fedorahosted.org/func/
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/docs/funcd.pod
^
|
@@ -16,7 +16,7 @@
funcd registers itself to a certificate server (certmaster) listed in /etc/certmaster/minion.conf and takes orders from the command line func when that program is run from that certificate server.
-Modules and capabilities provided by funcd are specified at https://hosted.fedoraproject.org/projects/func/
+Modules and capabilities provided by funcd are specified at https://fedorahosted.org/func/
=head1 OPTIONS
@@ -58,16 +58,18 @@
=item B</etc/func/minion-acl.d/> directory for ACL files
+=back
+
=head1 ADDITONAL RESOURCES
-See the project wiki at https://hosted.fedoraproject.org/projects/func/
+See the project wiki at https://fedorahosted.org/func/
=head1 SEE ALSO
-L<func(1)>, L<certmaster(1)>, L<certmaster-ca(1)>, L<func-transmit(1)>
+C<func(1)>, C<certmaster(1)>, C<certmaster-ca(1)>, C<func-transmit(1)>
=head1 AUTHOR
-Various, see https://hosted.fedoraproject.org/projects/func
+Various, see https://fedorahosted.org/func/
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/docs/release_process.txt
^
|
@@ -0,0 +1,84 @@
+This is just an attempt to note what a release needs, mainly
+so I don't forget.
+
+
+- test the tree
+ - testit.sh/unittests
+
+- rev the version (version is set in setup.py, funcs.spec, and the
+Makefile. I really hate having this in three places but my previous
+approach ('version' file) was frowned upon by fedora build system
+
+- tag the git tree for the release
+ git tag -a release_name
+
+ release name is of the form v0.25
+
+ see "git tag -l" for examples of existing tags
+
+ probably want to wait until you know the latter steps
+ work correctly before tagging.
+
+- push the tags upstream
+ - by default git doesnt sync tags on push so:
+
+ git push --tags
+
+
+- build a tarball (I currently just use "make" or "make rpms"
+and use the tarball from rpm-build/)
+
+- make a checksum of the tarball
+ for example:
+ sha256sum tarball > tarball.sha256
+
+- write up the release notes
+ More is better. See "git log" and func trace timeline for
+ a start.
+
+- push the tarball and chksum files to it's upstream home. Currently it's at
+ http://people.fedoraproject.org/~alikins/files/func/
+
+ But it can move if need be. Just update the trac wiki Releases page.
+
+- build the rpms for fedora/epel
+
+ I mostly use the "pusher.py" script for this, as it automates
+ most of this somewhat tedious process.
+
+ There is a lot of setup needed before it will run correctly
+ however. You need to get a fedora package maintainer setup
+ up and running. The fedora devel docs go into more detail
+ about this.
+
+ See:
+ http://fedoraproject.org/wiki/PackageMaintainers/Join
+
+
+ Check the PROCESS_RELEASES variable in pusher.py. This defines
+ what releases to build packages for. You may need to update
+ this if new releases are available as targets since the last
+ time it was built.
+
+ Run it like:
+
+ ./pusher.py --cvs ~/cvs/func --prof ~/src/func
+
+ --cvs points to your checkout of the fedora "dist-cvs" tree
+ for func
+
+ Currently that is cvsroot :ext:USERNAME@cvs.fedoraproject.org:/cvs/pkgs
+
+ See http://fedoraproject.org/wiki/Using_Fedora_CVS
+
+ --proj just points to a git checkout of the project that you
+ have built the rpms for (pusher.py assume PROJECT/rpm-build exists
+ and has been populated)
+
+- once the packages have been built, they need to be pushed into the
+fedora update system
+
+- EPEL packages are mostly pulled automatically if they have been built for
+EPEL with "plague" (pusher.py builds for this automatically if a rhel target has been put into "PROCESS_RELEASES" in pusher.py
+
+
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/etc/Bridge.conf
^
|
@@ -0,0 +1,6 @@
+[main]
+ignorebridges = virbr0
+brctl = /usr/sbin/brctl
+ip = /sbin/ip
+ifup = /sbin/ifup
+ifdown = /sbin/ifdown
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/etc/Test.conf
^
|
@@ -2,7 +2,7 @@
[main]
example = 1
int_option = 37
-bool_option = True
+bool_option = true
float_option = 3.14159
string_option = "this is a string"
testvalue = 100
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/etc/Vlan.conf
^
|
@@ -0,0 +1,6 @@
+[main]
+ignorevlans =
+vconfig = /sbin/vconfig
+ip = /sbin/ip
+ifup = /sbin/ifup
+ifdown = /sbin/ifdown
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/etc/func_rotate
^
|
@@ -5,7 +5,7 @@
weekly
postrotate
if [ -f /var/lock/subsys/funcd ]; then
- /etc/init.d/funcd condrestart
+ /etc/init.d/funcd condrestart > /dev/null
fi
endscript
}
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/etc/minion.conf
^
|
@@ -1,8 +1,10 @@
# configuration for minions
[main]
-log_level = DEBUG
+log_level = INFO
acl_dir = /etc/func/minion-acl.d
listen_addr =
listen_port = 51234
+minion_name =
+method_log_dir = /var/log/func/methods/
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/etc/overlord.conf
^
|
@@ -0,0 +1,7 @@
+# configuration for overlord
+
+[main]
+socket_timeout = 0
+backend = conf
+group_db =
+delegate = False
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/etc/version
^
|
@@ -0,0 +1,5 @@
+version: 0.28
+release: 1
+source build date: Thu Apr 7 17:18:46 EDT 2011
+git commit: 73b692f57a5e56237a0c8d87726acba724d2464c
+git date: Thu Apr 7 13:40:15 2011 -0400
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/etc/version~
^
|
@@ -0,0 +1,5 @@
+version: 0.27.1
+release: 1
+source build date: Fri Feb 25 16:20:47 EST 2011
+git commit: 3d39047ea3e38cb644e0ca608d0415652dafde56
+git date: Fri Feb 25 15:53:01 2011 -0500
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/CommonErrors.py
^
|
@@ -66,4 +66,3 @@
self.value = value
def __str__(self):
return "%s" %(self.value,)
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/commonconfig.py
^
|
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+
"""
func
@@ -14,8 +14,10 @@
"""
-from certmaster.config import BaseConfig, BoolOption, Option, IntOption
+from certmaster.config import BaseConfig, BoolOption, Option, IntOption, FloatOption, ListOption
+FUNCD_CONFIG_FILE="/etc/func/minion.conf"
+OVERLORD_CONFIG_FILE="/etc/func/overlord.conf"
class FuncdConfig(BaseConfig):
log_level = Option('INFO')
@@ -24,3 +26,29 @@
listen_addr = Option('')
listen_port = IntOption('51234')
+ minion_name = Option('')
+
+ method_log_dir = Option("/var/log/func/methods/")
+ use_certmaster = BoolOption(True)
+ ca_file = Option('')
+ cert_file = Option('')
+ key_file = Option('')
+ crl_location = Option('')
+ module_list = ListOption([])
+
+
+class OverlordConfig(BaseConfig):
+ socket_timeout = FloatOption(0)
+ listen_port = IntOption('51234')
+ backend = Option('conf')
+ group_db = Option('')
+ key_file = Option('')
+ cert_file = Option('')
+ ca_file = Option('')
+ delegate = BoolOption(False)
+ puppet_minions = BoolOption(False)
+ puppet_inventory = Option('/var/lib/puppet/ssl/ca/inventory.txt')
+ puppet_signed_certs_dir = Option('/var/lib/puppet/ssl/ca/signed')
+ puppet_crl = Option('/var/lib/puppet/ssl/ca/ca_crl.pem')
+ host_down_list = Option('/var/lib/func/hosts_down.lst')
+ allow_unknown_minions = BoolOption(False)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/forkbomb.py
^
|
@@ -20,9 +20,10 @@
import sys
import tempfile
import fcntl
+from func import utils
DEFAULT_FORKS = 4
-DEFAULT_CACHE_DIR = "/var/lib/func"
+DEFAULT_CACHE_DIR = utils.getCacheDir()
def __get_storage(dir):
"""
@@ -31,7 +32,7 @@
dir = os.path.expanduser(dir)
if not os.path.exists(dir):
os.makedirs(dir)
- return tempfile.mktemp(suffix='', prefix='asynctmp', dir=dir)
+ return tempfile.mkstemp(suffix='', prefix='asynctmp', dir=dir)[1]
def __access_buckets(filename,clear,new_key=None,new_value=None):
"""
@@ -41,7 +42,7 @@
handle = open(filename,"w")
fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
- internal_db = dbm.open(filename, 'c', 0644 )
+ internal_db = dbm.open(filename, 'c', 0600 )
storage = shelve.Shelf(internal_db)
if clear:
@@ -52,7 +53,7 @@
if not storage.has_key("data"):
storage["data"] = {}
- else:
+ else:
pass
if new_key is not None:
@@ -78,7 +79,7 @@
slot = count % slots
count = count + 1
if not buckets.has_key(slot):
- buckets[slot] = []
+ buckets[slot] = []
buckets[slot].append(key)
return buckets
@@ -111,11 +112,11 @@
else:
raise ose
else:
- __with_my_bucket(mybucket,buckets,what_to_do,filename)
+ __with_my_bucket(mybucket,buckets,what_to_do,filename)
os._exit(0)
def __demo(bucket_number, buckets, my_item):
- """
+ """
This is a demo handler for test purposes.
It just multiplies all numbers by 1000, but slowly.
"""
@@ -129,11 +130,11 @@
"""
Given an array of items (pool), call callback in each one, but divide
the workload over nfork forks. Temporary files used during the
- operation will be created in cachedir and subsequently deleted.
+ operation will be created in cachedir and subsequently deleted.
"""
if nforks < 1:
- # modulus voodoo gets crazy otherwise and bad things happen
- nforks = 1
+ # modulus voodoo gets crazy otherwise and bad things happen
+ nforks = 1
shelf_file = __get_storage(cachedir)
__access_buckets(shelf_file,True,None)
buckets = __bucketize(pool, nforks)
@@ -155,5 +156,3 @@
if __name__ == "__main__":
__test()
-
-
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/index_db.py
^
|
@@ -0,0 +1,143 @@
+import shelve
+import dbm
+import fcntl
+from func import utils
+
+INTERNAL_DB_FILE = "log_matcher"
+
+class IndexDb(object):
+ """
+ A simple wrapper for index Db,which
+ is a kind of pickle ...
+ """
+ WRITE_MODE = "w"
+ READ_MODE = "r"
+
+ def __init__(self,dir=None):
+ """
+ Load the db when have an instance
+ """
+ self.__storage = None
+ self.__handle = None
+ self.__dir = utils.getCacheDir()
+
+ def __load_index(self):
+ """
+ Gets the store object for that instance
+ """
+ import os
+ filename=os.path.join(self.__dir,INTERNAL_DB_FILE)
+ try:
+ self.__handle = open(filename,self.__mode)
+ except IOError, e:
+ print 'Cannot create status file. Ensure you have permission to write'
+ return False
+
+ fcntl.flock(self.__handle.fileno(), fcntl.LOCK_EX)
+ internal_db = dbm.open(filename, 'c', 0600 )
+ self.__storage = shelve.Shelf(internal_db)
+ return True
+
+ def write_to_index(self,write_dict):
+ """
+ Writes the dictonary into the index
+ """
+ self.__mode = self.WRITE_MODE
+ if not self.__storage:
+ self.__load_index()
+ try:
+ for key,value in write_dict.iteritems():
+ self.__storage[key]=value
+ except Exception,e:
+ print e
+ self.__storage = None
+ return False
+
+ self.__close_storage()
+ return True
+
+ def read_from_index(self):
+ """
+ Returns back a copy dict of the db
+ """
+ self.__mode = self.READ_MODE
+ if not self.__storage:
+ self.__load_index()
+
+ try:
+ tmp=dict(self.__storage)
+ except Exception,e:
+ print e
+ self.__storage = None
+ return None
+
+ self.__close_storage()
+ return tmp
+
+
+ def delete_from_index(self,delete_list):
+ """
+ Deletes a list of items from current store object
+ """
+ self.__mode = self.WRITE_MODE
+ if not self.__storage:
+ self.__load_index()
+
+ try:
+ for to_delete in delete_list:
+ if self.__storage.has_key(to_delete):
+ del self.__storage[to_delete]
+ except Exception,e:
+ print e
+ self.__storage = None
+ return False
+
+ self.__close_storage()
+ return True
+
+ def __close_storage(self):
+ """
+ Close all the stuff
+ """
+ if not self.__storage:
+ return False
+
+ self.__storage.close()
+ fcntl.flock(self.__handle.fileno(), fcntl.LOCK_UN)
+ self.__storage = None
+ return True
+
+#we need some util methods
+def get_index_data(dir=None):
+ """
+ A simple getter method for above structure
+ """
+ db = IndexDb(dir)
+ result = db.read_from_index()
+ return result
+
+def write_index_data(data,dir=None):
+ """
+ A simple setter method for above structure
+ """
+ db = IndexDb(dir)
+ result = db.write_to_index(data)
+ return result
+
+def delete_index_data(data,dir=None):
+ """
+ A simple deletter method for above structure
+ """
+ db = IndexDb(dir)
+ result = db.delete_from_index(data)
+ return result
+
+def key_exists(key,dir=None):
+ """
+ Checks for a key if is there
+ """
+ dict = get_index_data(dir)
+ return dict.has_key(key)
+
+if __name__ == "__main__":
+ pass
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/jobthing.py
^
|
@@ -34,10 +34,10 @@
JOB_ID_REMOTE_ERROR = 4
# how long to retain old job records in the job id database
-RETAIN_INTERVAL = 60 * 60
+RETAIN_INTERVAL = 60 * 60
# where to store the internal job id database
-CACHE_DIR = "/var/lib/func"
+CACHE_DIR = utils.getCacheDir()
def __update_status(jobid, status, results, clear=False):
return __access_status(jobid=jobid, status=status, results=results, write=True)
@@ -53,7 +53,7 @@
def __purge_old_jobs(storage):
"""
- Deletes jobs older than RETAIN_INTERVAL seconds.
+ Deletes jobs older than RETAIN_INTERVAL seconds.
MINOR FIXME: this probably should be a more intelligent algorithm that only
deletes jobs if the database is too big and then only the oldest jobs
but this will work just as well.
@@ -84,23 +84,23 @@
def __get_open_ids(storage):
"""
- That method is needes from other language/API/UI/GUI parts that uses
+ That method is needes from other language/API/UI/GUI parts that uses
func's async methods to know the status of the results.
"""
result_hash_pack = {}
#print storage
for job_id,result in storage.iteritems():
- #TOBE REMOVED that control is for old job_ids
+ #TOBE REMOVED that control is for old job_ids
#some users who will upgrade to new version will have errors
#if we dont have that control here :)
- if len(job_id.split("-"))>=4: #ignore the old job_ids the overlord part
+ if len(job_id.split("-"))>=4: #ignore the old job_ids the overlord part
result_hash_pack[job_id]=result[0]
elif len(job_id.split("-"))==2: #it seems to be a minion side id and also ignores old ids
result_hash_pack[job_id]=result[0]
return result_hash_pack
-
+
def __access_status(jobid=0, status=0, results=0, clear=False, write=False, purge=False,get_all=False):
@@ -111,14 +111,14 @@
except IOError:
raise Func_Client_Exception, 'Cannot create directory for status files. '+\
'Ensure you have permission to create %s directory' % dir
- filename = os.path.join(dir,"status-%s" % os.getuid())
+ filename = os.path.join(dir,"status-%s" % os.getuid())
try:
handle = open(filename,"w")
except IOError, e:
raise Func_Client_Exception, 'Cannot create status file. Ensure you have permission to write in %s directory' % dir
fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
- internal_db = dbm.open(filename, 'c', 0644 )
+ internal_db = dbm.open(filename, 'c', 0600 )
storage = shelve.Shelf(internal_db)
@@ -127,7 +127,7 @@
storage.close()
fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
return {}
-
+
if purge or write:
__purge_old_jobs(storage)
@@ -158,11 +158,11 @@
Given an array of items (pool), call callback in each one, but divide
the workload over nfork forks. Temporary files used during the
- operation will be created in cachedir and subsequently deleted.
+ operation will be created in cachedir and subsequently deleted.
"""
-
+
job_id = utils.get_formated_jobid(**extra_args)
-
+
__update_status(job_id, JOB_ID_RUNNING, -1)
pid = os.fork()
if pid != 0:
@@ -170,20 +170,19 @@
else:
# kick off the job
results = forkbomb.batch_run(pool, callback, nforks)
-
- # write job IDs to the state file on overlord
+
+ # write job IDs to the state file on overlord
__update_status(job_id, JOB_ID_PARTIAL, results)
# we now have a list of job id's for each minion, kill the task
os._exit(0)
-def minion_async_run(retriever, method, args):
+
+def minion_async_run(retriever, method, args,minion_query=None):
"""
This is a simpler invocation for minion side async usage.
"""
# to avoid confusion of job id's (we use the same job database)
- # minion jobs contain the string "minion".
-
-
+ # minion jobs contain the string "minion".
job_id = "%s-minion" % pprint.pformat(time.time())
__update_status(job_id, JOB_ID_RUNNING, -1)
pid = os.fork()
@@ -199,8 +198,39 @@
os._exit(0)
try:
- function_ref = retriever(method)
- rc = function_ref(*args)
+
+ fact_result = None
+ if args and type(args[0]) == dict and args[0].has_key('__fact__'):
+ fact_result = minion_query.exec_query(args[0]['__fact__'],True)
+ else:
+ function_ref = retriever(method)
+ #here we will append the job_id at the end of the args list
+ #so we can pull it via some decorator and use it for other
+ #purposes like logginng and output tracking per method which
+ #will be useful for lots of applications ...
+ #if you are doing something useful with decorators per methods
+ #be aware of that please ...
+ args = list(args)
+ args.append({'__logger__':True,'job_id':job_id})
+ args = tuple(args)
+ rc = function_ref(*args)
+
+ if fact_result and fact_result[0]: #that means we have True from query so can go on
+ function_ref = retriever(method)
+ #here we will append the job_id at the end of the args list
+ #so we can pull it via some decorator and use it for other
+ #purposes like logginng and output tracking per method which
+ #will be useful for lots of applications ...
+ #if you are doing something useful with decorators per methods
+ #be aware of that please ...
+ args = list(args)
+ args.append({'__logger__':True,'job_id':job_id})
+ args = tuple(args)
+ rc = function_ref(*args[1:])
+ rc = [{'__fact__':fact_result},rc]
+ elif fact_result and not fact_result[0]:
+ rc = [{'__fact__':fact_result}]
+
except Exception, e:
(t, v, tb) = sys.exc_info()
rc = cm_utils.nice_exception(t,v,tb)
@@ -208,13 +238,20 @@
__update_status(job_id, JOB_ID_FINISHED, rc)
os._exit(0)
-def job_status(jobid, client_class=None):
-
+
+#import for matching minion job ids with -- overlord job_ids
+from func.index_db import write_index_data
+from func.index_db import key_exists
+
+def job_status(jobid, client_class=None, client_class_config=None):
+
# NOTE: client_class is here to get around some evil circular reference
# type stuff. This is intended to be called by minions (who can leave it None)
# or by the Client module code (which does not need to be worried about it). API
# users should not be calling jobthing.py methods directly.
-
+ # NOTE: class_config is here so we can pass in all of our settings from the
+ # parent - otherwise async jobs with timeouts go straight to hell.
+
got_status = __get_status(jobid)
# if the status comes back as JOB_ID_PARTIAL what we have is actually a hash
# of hostname/minion-jobid pairs. Instantiate a client handle for each and poll them
@@ -228,10 +265,25 @@
some_missing = False
+ match_dict = {}
for host in interim_results.keys():
minion_job = interim_results[host]
- client = client_class(host, noglobs=True, async=False)
+ #here we inject the minion_job id and overlord job_id
+ #in a file that they point to each other, the reason of
+ #doing that is because overlord looses minion job_id
+ #after getting result from minion, we dont want that
+ #because some applications may need log outputs of some
+ #finished minion methods. The logs of minion methods are kept
+ #in minon site as minion job_id named log files the only way
+ #to track them is having them in dicts {overlord_job_id : minion_job_id}
+ if match_dict.has_key(jobid):
+ match_dict[jobid].append((minion_job,host))
+ else:
+ match_dict={jobid:[]}
+ match_dict[jobid].append((minion_job,host))
+
+ client = client_class(host, noglobs=True, async=False, config=client_class_config)
minion_result = client.jobs.job_status(minion_job)
if type(minion_result) != list or len(minion_result)!=2:
@@ -245,12 +297,15 @@
partial_results[host] = [ utils.REMOTE_ERROR, "lost job" ]
else:
partial_results[host] = minion_interim_result
- else:
+ else:
some_missing = True
+ #write the match dictionary for {overlord_job_id:minion_job_id}
+ write_index_data(match_dict)
+
if some_missing or not interim_results:
return (JOB_ID_PARTIAL, partial_results)
-
+
else:
# Save partial results in state file so next time we don't
# call minions to get status.
@@ -260,10 +315,8 @@
else:
return got_status
-
+
# of job id's on the minion in results.
if __name__ == "__main__":
__test()
-
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/logger.py
^
|
@@ -12,8 +12,9 @@
##
##
-
import logging
+import os
+
from certmaster.config import read_config
from func.commonconfig import FuncdConfig
@@ -33,16 +34,24 @@
def __init__(self, logfilepath ="/var/log/func/func.log"):
config_file = '/etc/func/minion.conf'
- self.config = read_config(config_file, FuncdConfig)
+ self.config = read_config(config_file, FuncdConfig)
self.loglevel = logging._levelNames[self.config.log_level]
self._setup_logging()
if self._no_handlers:
self._setup_handlers(logfilepath=logfilepath)
-
+
def _setup_logging(self):
self.logger = logging.getLogger("svc")
def _setup_handlers(self, logfilepath="/var/log/func/func.log"):
+
+ # we try to log module loading and whatnot, even if we aren't
+ # root, so if we can't write to the log file, ignore it
+ # this lets "--help" work as a user
+ # https://fedorahosted.org/func/ticket/75
+ if not os.access(logfilepath, os.W_OK):
+ return
+
handler = logging.FileHandler(logfilepath, "a")
self.logger.setLevel(self.loglevel)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
@@ -74,3 +83,97 @@
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self._no_handlers = False
+
+#some more dynamic logger handlers here
+config_file = '/etc/func/minion.conf'
+config = read_config(config_file, FuncdConfig)
+GLOBAL_LOG_DIR = config.method_log_dir
+class StandartLogger(object):
+ """
+ It is just a proxy like object to the logging
+ module so we control here the stuff
+ """
+
+ def __init__(self,handlers,app_name,**kwargs):
+ self.logger = logging.getLogger(app_name)
+ self.logger.setLevel(logging.DEBUG)
+
+ self.handlers = handlers
+ self.__setup_handlers()
+
+ def __setup_handlers(self):
+ # a default case is to have a FileHandler for all that is what we want
+ for handler in self.handlers:
+ self.logger.addHandler(handler)
+
+ def progress(self,current,all):
+ """
+ A method to log the progress of the
+ running method ...
+ """
+ self.logger.debug("Progress report %d/%d completed"%(current,all))
+
+ def debug(self,msg):
+ self.logger.debug(msg)
+ def info(self,msg):
+ self.logger.info(msg)
+ def critical(self,msg):
+ self.logger.critical(msg)
+ def error(self,msg):
+ self.logger.error(msg)
+ def exception(self,msg):
+ self.logger.exception(msg)
+ def warn(self,msg):
+ self.logger.warn(msg)
+
+#----------------------------------HANDLERS------------------------------------------------
+class AbstarctHandler(object):
+ pass
+
+class StandartHandler(AbstarctHandler):
+ """
+ Standart one just has a filehandler in it
+ """
+ def __init__(self,formatter,**kwargs):
+ if kwargs.has_key('log_place'):
+ self.log_place = "".join([kwargs['log_place']])
+ log_f = os.path.join(GLOBAL_LOG_DIR,self.log_place)
+ if not os.path.exists(os.path.split(log_f)[0]):
+ os.mkdir(os.path.split(log_f)[0])
+
+ self.handler = logging.FileHandler((log_f), "a")
+ self.handler.setFormatter(formatter)
+
+ def __getattr__(self,name):
+ return getattr(self.handler,name)
+
+#--- some formatters here ---
+def standart_formatter():
+ return logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
+
+def exception_formatter():
+ return logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(message)s")
+
+#----------------------------------HANDLERS------------------------------------------------
+STANDART_LOGGER = 0
+EXCEPTION_LOGGER = 1
+
+class LogFactory(object):
+
+ def get_instance(type=STANDART_LOGGER,app_name="direct_log",log_place=None):
+ if type == STANDART_LOGGER:
+ if not log_place:
+ log_place = "".join([app_name.strip()])
+ sh = StandartHandler(standart_formatter(),log_place=log_place)
+ logger = StandartLogger([sh.handler],app_name=app_name)
+ return logger
+ elif type == EXCEPTION_LOGGER:
+ #we will add the prefixes here ok
+ if not log_place:
+ log_place = "".join([app_name.strip()])
+ sh = StandartHandler(exception_formatter(),log_place=log_place)
+ logger = StandartLogger([sh],app_name=app_name)
+ return logger
+ else:
+ return None
+ get_instance = staticmethod(get_instance)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/AuthedXMLRPCServer.py
^
|
@@ -70,7 +70,10 @@
def __init__(self, address, pkey, cert, ca_cert, authinfo_callback=None, timeout=None):
BaseAuthedXMLRPCServer.__init__(self, address, authinfo_callback)
- SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, address, AuthedSimpleXMLRPCRequestHandler)
+ if sys.version_info[0] <= 2 and sys.version_info[1] <= 4:
+ SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, address, AuthedSimpleXMLRPCRequestHandler)
+ else:
+ SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, address, AuthedSimpleXMLRPCRequestHandler, allow_none=True)
SSLCommon.BaseSSLServer.__init__(self, address, AuthedSimpleXMLRPCRequestHandler, pkey, cert, ca_cert, timeout=timeout)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/acls.py
^
|
@@ -14,7 +14,8 @@
import glob
import os
import sys
-
+import time
+import stat
from func import logger
@@ -23,36 +24,60 @@
class Acls(object):
def __init__(self, config=None):
self.config = config
-
+
self.acldir = self.config.acl_dir
- self.acls = {}
+ self._acl_glob = '%s/*.acl' % self.acldir
+ self._acls = {}
self.logger = logger.Logger().logger
self.certmaster_overrides_acls = self.config.certmaster_overrides_acls
+ self.last_load_time = 0
self.load()
- def load(self):
+ def _reload_acls(self):
+ """
+ return True if most recent timestamp of any of the acl files in the acl
+ dir is more recent than the last_load_time
+ """
+
+ # if we removed or added a file - this will trigger a reload
+ if os.stat(self.acldir)[stat.ST_MTIME] > self.last_load_time:
+ return True
+
+ # if we modified or added a file - this will trigger a reload
+ for fn in glob.glob(self._acl_glob):
+ if os.stat(fn)[stat.ST_MTIME] > self.last_load_time:
+ return True
+
+ return False
+
+ def load(self):
"""
takes a dir of .acl files
returns a dict of hostname+hash = [methods, to, run]
-
+
"""
-
+
if not os.path.exists(self.acldir):
sys.stderr.write('acl dir does not exist: %s\n' % self.acldir)
- return self.acls
-
+ return self._acls
+
+ if not self._reload_acls():
+ return self._acls
+
+ self.logger.debug("acl [re]loading")
+ self._acls = {} # nuking from orbit - just in case
+
# get the set of files
- acl_glob = '%s/*.acl' % self.acldir
- files = glob.glob(acl_glob)
-
+ files = glob.glob(self._acl_glob)
+
for acl_file in files:
- self.logger.debug("acl_file", acl_file)
+ self.logger.debug("acl_file %s", acl_file)
try:
fo = open(acl_file, 'r')
except (IOError, OSError), e:
sys.stderr.write('cannot open acl config file: %s - %s\n' % (acl_file, e))
continue
-
+
for line in fo.readlines():
if line.startswith('#'): continue
if line.strip() == '': continue
@@ -62,30 +87,31 @@
methods = methods.strip()
methods = methods.replace(',',' ')
methods = methods.split()
- if not self.acls.has_key(host):
- self.acls[host] = []
- self.acls[host].extend(methods)
+ if not self._acls.has_key(host):
+ self._acls[host] = []
+ self._acls[host].extend(methods)
- self.logger.debug("acls %s" % self.acls)
+ self.logger.debug("acls %s" % self._acls)
- return self.acls
+ self.last_load_time = time.time()
+ return self._acls
+
+ acls = property(load)
def check(self, cm_cert, cert, ip, method, params):
# certmaster always gets to run things
# unless we are testing, and need to turn it off.. -al;
-
-
if self.config.certmaster_overrides_acls:
ca_cn = cm_cert.get_subject().CN
ca_hash = cm_cert.subject_name_hash()
ca_key = '%s-%s' % (ca_cn, ca_hash)
- self.acls[ca_key] = ['*']
+ self._acls[ca_key] = ['*', 'foo']
cn = cert.get_subject().CN
sub_hash = cert.subject_name_hash()
self.logger.debug("cn: %s sub_hash: %s" % (cn, sub_hash))
- self.logger.debug("acls %s" % self.acls)
+ self.logger.debug("current acls %s" % self.acls)
if self.acls:
allow_list = []
hostkey = '%s-%s' % (cn, sub_hash)
@@ -98,7 +124,7 @@
for methodmatch in allow_list:
if fnmatch.fnmatch(method, methodmatch):
return True
-
+
return False
def save(self):
@@ -112,5 +138,3 @@
def update(self, acl, host):
pass
-
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/codes.py
^
|
@@ -25,5 +25,9 @@
class AccessToMethodDenied(FuncException):
pass
-
+
+
+class ModuleNotFoundException(FuncException):
+ pass
+
# FIXME: more sub-exceptions maybe
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/facts
^
|
+(directory)
|
|
Changed |
func-0.28.tar.bz2/func/minion/facts/__init__.py
^
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/facts/minion_query.py
^
|
@@ -0,0 +1,422 @@
+from func.minion.facts.query import FuncLogicQuery
+from func.minion.facts.query_utils import Q
+
+class FactsMinion(object):
+ """
+ That class wil be responsible for:
+ 1. De-serialization of the query by converting
+ it to a Q structure.
+
+ 2. Calling the ( prewritten ) 'facts' modules
+ """
+ VALID_QUERY_KEYS = ["AND","OR","NOT"]
+
+ def __init__(self,fact_query=None,method_fact_list=None):
+ """
+ Constructor
+
+ @type fact_query : FuncLogicQuery
+ @param fact_query : FuncLogicQuery that will be produced from that class if
+ not given from constructor as it is here.
+ @type method_fact_list : list
+ @param method_fact_list : List of prewritten 'facts' modules you can
+ find them under facts/modules directory
+
+ @return : None
+ """
+ self.fact_query = fact_query
+ self.keyword_query = QueryKeyword()
+ self.method_fact_list = method_fact_list
+
+ def deserialize(self,q_list):
+ """
+ Method gets the lists that is sent from overlord (pure string or list)
+ and converts into a FuncLogicQuery so facts can be pulled from prewritten
+ modules...
+
+ @type q_list : list
+ @param q_list : Query list
+ """
+ q_result = self.__traverse_deserialize(q_list)
+ if not self.fact_query:
+ self.fact_query = FuncLogicQuery(q_result,self.pull_facts)
+ else:
+ if self.fact_query.q.connector == "OR":
+ self.fact_query = self.fact_query | FuncLogicQuery(q_result,self.pull_facts)
+ else:
+ self.fact_query = self.fact_query & FuncLogicQuery(q_result,self.pull_facts)
+
+ return q_result
+
+ def exec_query(self,q_list,include_results=False):
+ """
+ The magic method which gives the final result of that
+ query with values in it if requested. That is why we
+ have include_results sometimes.
+
+ @type q_list : list
+ @param q_list : Query list
+ @type include_results : boolean
+ @param include_results : As it is known the main purpose
+ of facts is to return True or False
+ but sometimes we need the real result
+ also included.
+
+
+ """
+ self.deserialize(q_list)
+ if not include_results:
+ return self.fact_query.result
+ else:
+ return self.fact_query.exec_query_with_facts()
+
+ def pull_facts(self,overlord_tuple):
+ """
+ Pull facts is kind of callback method which
+ is called by FuncLogicQuery when does the logic
+ query operation in its recursive traversing ...
+ That way we dont put all the pulling operations in
+ FuncLogicQuery but making it pluggable by calling from
+ outside. That is the place where we actually call the 'fact'
+ modules. If you need change in calling fact methods that is
+ the place to go .
+
+ @type overlord_tuple : Tuple
+ @param overlord_tuple : A tuple in format of (factname__factoperation,compared_value)
+ """
+ keyword_tuple = overlord_tuple[0].split("__")
+ overlord_value = overlord_tuple[1]
+
+ if len(keyword_tuple) > 1:
+ keyword = keyword_tuple[1]
+ else:
+ keyword = ""
+ fact_name = keyword_tuple[0]
+
+ if not self.method_fact_list.has_key(fact_name):
+ raise MinionQueryError("None existing Fact method or tag required %s "%fact_name)
+
+ fact_value = self.method_fact_list[fact_name]()
+
+ #we have the result with fact now it is time to resolve it
+ logic_fact = self.keyword_query.resolve(keyword,overlord_value,fact_value)
+
+ #the return part is a tuple (logic_Value which is True or False and fact_name and value dictionary )
+ return (logic_fact,{fact_name:fact_value})
+
+ def __traverse_deserialize(self,traverse_object):
+ """
+ The private recursive part that traverses
+ the incoming overlord list (included with facts)
+ and produces a Q object query object from it.
+ **(heavy recursive code)Caution any change here
+ can make facts not tobe deserialized correctly so
+ that will break the facts API.
+
+ @type : list
+ @param : List of overlord incoming facts query
+ something like :[NOT,[AND,[a,TRUE,b,FALSE]]]
+
+ @return : Q object returned
+ """
+
+ q_object = None
+ #lets try divide and conquer :)
+ #assume that it is [NOT,[AND,[a,TRUE,b,FALSE]]]
+
+ #print "The traverse object at start is ",traverse_object
+ tmp_negated = False
+ tmp_connector ="AND"
+ if type(traverse_object[0]) == str and traverse_object[0] == "NOT":
+ tmp_negated = True
+ #q_object.negated = ~q_object
+ traverse_object = traverse_object[1:][0]
+ #print "After NOT the traverse_object is ",traverse_object
+ #raw_input()
+ if type(traverse_object[0]) == str and traverse_object[0] in ["AND","OR"]:
+ #q_object.connector = traverse_object[0]
+ tmp_connector = traverse_object[0]
+ traverse_object = traverse_object[1:][0]
+ #print "After CONNETOR the traverse_object is ",traverse_object
+ #raw_input()
+
+ if type(traverse_object[0])==str and not traverse_object[0] in self.VALID_QUERY_KEYS:
+ #print "In children : ",traverse_object
+ for ch in xrange(0,len(traverse_object),2):
+ #q_object.add(Q(tuple(traverse_object[ch:ch+2])),q_object.connector)
+ #print "We work on ",traverse_object[ch:ch+2]
+ if not q_object:
+ q_object = Q(tuple(traverse_object[ch:ch+2]))
+ q_object.connector = tmp_connector
+ else:
+ if q_object.connector == "OR":
+ q_object = q_object | Q(tuple(traverse_object[ch:ch+2]))
+ else:
+ q_object = q_object & Q(tuple(traverse_object[ch:ch+2]))
+ if tmp_negated:
+ q_object = ~q_object
+
+
+ #print "IN children Q object is ",q_object
+ traverse_object = []
+ #print "After CHILDREN the traverse_object is ",traverse_object
+ #raw_input()
+
+ if traverse_object:
+
+ #print "The traverse object at end is ",traverse_object
+ #raw_input()
+ for t_o in traverse_object:
+ #print "The traverse object at end is ",t_o
+ #raw_input()
+
+ tmp_q = self.__traverse_deserialize(t_o)
+ #print "I ADD THAT TO THE ALL ",tmp_q
+ #print "WILL BE ADDED TO ",q_object
+ if not q_object:
+ q_object = Q()
+ q_object.connector = tmp_connector
+ #q_object.add(tmp_q,q_object.connector)
+ if tmp_connector== "OR":
+ q_object = q_object | tmp_q
+ else:
+ q_object = q_object & tmp_q
+ #print "AFTER ADDITION ",q_object
+ if tmp_negated:
+ q_object = ~q_object
+
+ return q_object
+
+
+
+FACTS_MODULES = "func/minion/facts/modules/"
+from func.module_loader import load_modules
+from func.minion.facts.modules import fact_module
+def load_facts_modules():
+ """
+ loads the facts modules same way we do with
+ minion modules ,keeps the refernces globally
+ """
+ return load_modules(path=FACTS_MODULES,main_class=fact_module.BaseFactModule)
+
+def load_fact_methods(abort_on_conflict = False):
+ """
+ Loads the fact methods
+ """
+ fact_methods = {}
+ loaded_modules = load_facts_modules()
+ for module_name,module in loaded_modules.iteritems():
+ res = module.register_facts(fact_methods,module_name,abort_on_conflict)
+ # a control if we have some conflict with loading facts
+ if res:
+ return {'__conflict__':res}
+
+ #get the fact methods
+ return fact_methods
+
+
+class QueryKeyword(object):
+ """
+ That class is for resolving incoming keywords and
+ doing comparison stuff (the core part of the facts)
+
+ Ex: query_ob.filter(uname__contains="f9") the contains
+ word will be recognized by that class in will return True if it
+ contains that word ,easy and fun stuff, If people need to add more
+ fun keywords for FuncQuery that is the right place to do that.
+ """
+
+ def __init__(self):
+ pass
+
+ def resolve(self,keyword,overlord_value,fact_value):
+ """
+ That method is what will be called from outside
+ and will give back the value True,False that is needed.
+
+ @type keyword : string
+ @param keyword : The name of the method that will be called
+ Ex: contains,icontains ...
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+ if not hasattr(self,"keyword_%s"%keyword):
+ raise NonExistingQueryKeyword("The keyword %s used in query is not a valid one"%keyword)
+ return getattr(self,"keyword_%s"%keyword)(self.__convert_input(overlord_value,fact_value),fact_value)
+
+ def __convert_input(self,overlord_value,fact_value):
+ """
+ If the overlord value that comes from client is
+ not the same as facts we should do some convention ..
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+ if type(overlord_value) != type(fact_value):
+ fact_type = type(fact_value)
+ return fact_type(overlord_value)
+ else:
+ return overlord_value
+
+ def keyword_contains(self,overlord_value,fact_value):
+ """
+ A simple method for contains, which checks if the
+ fact_value contains the overlord_value
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+
+ res = fact_value.find(overlord_value)
+ if res == -1:
+ return False
+ else:
+ return True
+
+ def keyword_icontains(self,overlord_value,fact_value):
+ """
+ A simple method for contains, which checks if the
+ fact_value contains the overlord_value (case insensitive)
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+ res = fact_value.lower().find(overlord_value.lower())
+ if res == -1:
+ return False
+ else:
+ return True
+
+ def keyword_iexact(self,overlord_value,fact_value):
+ """
+ Looks for an iexact match
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+ if overlord_value.lower() == fact_value.lower():
+ return True
+ else:
+ return False
+
+ def keyword_startswith(self,overlord_value,fact_value):
+ """
+ A typical python starts with keyword implementation
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+ if fact_value.startswith(overlord_value):
+ return True
+ else:
+ return False
+
+ def keyword_gt(self,overlord_value,fact_value):
+ """
+ A greater keyword
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+ if overlord_value < fact_value:
+ return True
+ else:
+ return False
+
+
+ def keyword_gte(self,overlord_value,fact_value):
+ """
+ A greater keyword
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+ if overlord_value <= fact_value:
+ #print "Comparing %s -- %s "%(overlord_value,fact_value)
+ return True
+ else:
+ return False
+
+ def keyword_lt(self,overlord_value,fact_value):
+ """
+ A less keyword
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+ if overlord_value > fact_value:
+ return True
+ else:
+ return False
+
+ def keyword_lte(self,overlord_value,fact_value):
+ """
+ A less equal keyword
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+ if overlord_value >= fact_value:
+ return True
+ else:
+ return False
+
+ def keyword_(self,overlord_value,fact_value):
+ """
+ A == keyword the default behaviour
+
+ @type overlord_value : string
+ @param overlord_value : Users value that he compares with system
+
+ @type fact_value : string
+ @param fact_value : The system fact value
+ """
+ if overlord_value == fact_value:
+ return True
+ else:
+ return False
+
+
+
+class NonExistingQueryKeyword(Exception):
+ """
+ When user calls a nonexisting fact keyword
+ """
+ pass
+
+class MinionQueryError(Exception):
+ """
+ When we have some Minion Query Error :)
+ """
+ pass
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/facts/modules
^
|
+(directory)
|
|
Changed |
func-0.28.tar.bz2/func/minion/facts/modules/__init__.py
^
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/facts/modules/fact_module.py
^
|
@@ -0,0 +1,59 @@
+from func import logger
+from func.utils import is_public_valid_method
+
+class BaseFactModule(object):
+ """
+ The base fact module which is clever
+ enough to register the facts it is kind
+ of FuncModule but registers modules with
+ different convention and style .. Look
+ into other modules to get the idea ...
+ """
+ version = "0.0.0"
+ description = "Base module of all facts"
+
+
+
+ def __init__(self):
+ self.__init_log()
+
+ def __init_log(self):
+ log = logger.Logger()
+ self.logger = log.logger
+
+ def register_facts(self,fact_callers,module_name,abort_on_conflict=False):
+ # a dictionary to catch the conflicts
+ """
+ Be careful not override that method in your classes!
+
+ @param abort_on_conflict : Fact methods use a system called global tagging
+ So it maybe easy to get conflicts (having 2 facts)
+ with tha same name so when that is True it will
+ tell user that something is wrong. There is no need
+ to worry about the unittests will vcatch if sth bad
+ happens ...
+ """
+ conflicts = {}
+ for attr in dir(self):
+ if self.__is_public_valid_method(attr):
+ fact_method = getattr(self, attr)
+ fact_callers["%s.%s"%(module_name,attr)] = fact_method
+ if hasattr(fact_method,"tag"):
+ method_tag = getattr(fact_method,"tag")
+ if fact_callers.has_key(method_tag):
+ self.logger.warning("Facts has registered the tag : %s before, it was overriden"%method_tag)
+ if abort_on_conflict:
+ if not conflicts.has_key(method_tag):
+ conflicts[method_tag] = []
+ conflicts[method_tag].append(getattr(fact_method,"__name__","default"))
+ if getattr(fact_callers[method_tag],"__name__","default") not in conflicts[method_tag]:
+ conflicts[method_tag].append(getattr(fact_callers[method_tag],"__name__","default"))
+
+ fact_callers[method_tag] = fact_method
+
+ #if there is conflict show it
+ if abort_on_conflict:
+ return conflicts
+
+ def __is_public_valid_method(self,attr):
+ return is_public_valid_method(self, attr, blacklist=['register_facts'])
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/facts/modules/hardware.py
^
|
@@ -0,0 +1,69 @@
+import fact_module
+import sys
+import traceback
+
+class HardwareFacts(fact_module.BaseFactModule):
+ """
+ Will give some basic info abouut hardware things
+ """
+ version = "0.0.1"
+ description = "A modules that supplies hardware facts"
+
+ def __init__(self):
+ super(HardwareFacts,self).__init__()
+ try:
+ sys.path.append("/usr/share/smolt/client")
+ import smolt
+ except ImportError, e:
+ errmsg = "Import error while loading smolt for the hardware facts module. Smolt is probably not installed. This module is useless without it."
+ self.logger.warning(errmsg)
+ self.logger.warning("%s" % traceback.format_exc())
+ # hmm, what to return...
+ return
+
+ hardware = smolt.Hardware()
+ self.host = hardware.host
+
+ def run_level(self):
+ """
+ The runlevel of the system
+ """
+ return str(self.host.defaultRunlevel)
+
+ #for easier acces be creful should be unique
+ run_level.tag = "runlevel"
+
+ def os_name(self):
+ """
+ Gives back the os name of the system
+ """
+ return str(self.host.os)
+
+ #for easier acces be creful should be unique
+ os_name.tag = "os"
+
+
+ def cpu_vendor(self):
+ """
+ The cpu vendor easy one
+ """
+ return str(self.host.cpuVendor)
+
+ cpu_vendor.tag = "cpuvendor"
+
+
+ def cpu_model(self):
+ """
+ Cpu model
+ """
+ return str(self.host.cpuModel)
+
+ cpu_model.tag = "cpumodel"
+
+ def kernel_version(self):
+ """
+ Kernel version
+ """
+ return str(self.host.kernelVersion)
+
+ kernel_version.tag = "kernel"
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/facts/overlord_query.py
^
|
@@ -0,0 +1,99 @@
+#That module is going to contain the parts that
+#hide (proxies) the Overlord and Minion work
+
+from func.minion.facts.query import FuncLogicQuery
+
+def display_active_facts(result,with_facts=False):
+ """
+ When we got all of the results from minions we may need
+ to display only the parts that match the facts query
+
+ @type with_facts : boolean
+ @param with_facts : If you want to see the incoming fact values
+ that should be True,but it may make sense for
+ Python API only .
+ """
+
+ if type(result) != dict:
+ return result
+
+ final_display = {}
+ for minion_name,minion_result in result.iteritems():
+
+ #CAUTION ugly if statements around :)
+ if type(minion_result) == list and len(minion_result) > 0 and type(minion_result[0]) == dict and minion_result[0].has_key('__fact__') :
+ if minion_result[0]['__fact__'][0] == True:
+ if with_facts:
+ final_display[minion_name] = minion_result
+ else:
+ final_display[minion_name] = minion_result[1:][0]
+ else:
+ return result
+ return final_display
+
+class OverlordQuery(object):
+ """
+ That is the overlord part of the facts query
+ which will be included in Overlord class.The
+ most important duty will be to convert FuncLogicQuery
+ objects to lists so can be transferred over the wire:)
+ """
+ def __init__(self,*args,**kwargs):
+ """
+ An object just responsible for Keeping
+ overlord queries and doing some serialization
+ stuff if any ...
+ """
+ #some initialization stuff here ...
+ fact_query = None
+ if kwargs.has_key('fact_query'):
+ fact_query = kwargs['fact_query']
+ self.fact_query = fact_query or FuncLogicQuery()
+
+ #print "These are : ",self.overlord
+ #print "These are : ",self.fact_query
+
+ def serialize_query(self):
+ """
+ That part hides the complexity of internal data
+ in self.fact_query and passes it over the silent
+ network wire :)
+ """
+ return [self.fact_query.connector,self.__recurse_traverser(self.fact_query.q)]
+
+ def __recurse_traverser(self,q_object):
+ """
+ Recuresvily traverse the Q object and return
+ back a list like structure which is ready tobe
+ sent ...
+
+ @type q_object : FuncLogicQuery
+ @param q_object : FuncLogicQuery
+
+ @return : list of fact logic
+ """
+ results=[]
+ for n in q_object.children:
+ if not type(n) == tuple and not type(n) == list:
+ if n.negated:
+ results.append(["NOT",[n.connector,self.__recurse_traverser(n)]])
+ else:
+ results.append([n.connector,self.__recurse_traverser(n)])
+ else:
+ #here you will do some work
+ for ch in xrange(0,len(n),2):
+ results.append(n[ch:ch+2])
+
+ return results
+
+ def display_active(self,result,with_facts=False):
+ """
+ Get active ones only
+
+ @type with_facts : boolean
+ @param with_facts : If you want to see the incoming fact values
+ that should be True,but it may make sense for
+ Python API only .
+ """
+
+ return display_active_facts(result,with_facts)
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/facts/query.py
^
|
@@ -0,0 +1,277 @@
+from func.minion.facts.query_utils import Q
+from copy import deepcopy
+
+class BaseFuncQuery(object):
+ """
+ Is an object that u can pass Q objects to make it fetch
+ some results about some variables. FuncQuery is kind of
+ orm but not exactly.The exec_query returns True of False
+ that is what is all about. For example passing variables
+ to FuncQuery like temperature=21,uname="2.6.27.15" will
+ control on target machine if temperature is 21 and uname
+ is the expected if 2 of them are true the result will be
+ true and u will query the rest of the methods u requested
+ """
+
+ def __init__(self,q_object=None,pull_result=None):
+ self.q = q_object
+ #pull result variable is kind of important
+ #it can be an object or a method which will
+ #return back a True or False statement
+ self.pull_result = pull_result
+
+ def __getattribute__(self,name):
+ """
+ Making it kind of proxy object to the Q object
+
+ @type name : Not specified
+ @param name: Attribute requested
+ """
+ try:
+ return object.__getattribute__(self, name)
+ except AttributeError,e:
+ return object.__getattribute__(self.q,name)
+
+ def _clone(self,klass=None,q_object=None,pull_result=None):
+ """
+ When querying filter and other cool methods
+ we always return back a object ,it is better
+ it tobe a fresh one ...
+
+ @type klass: Class
+ @param klass: The object itself to be cloned
+
+ @type q_object : Q
+ @param q_object : Q object to be copied
+
+ @type pull_result : Method
+ @param pull_result : Copy the reference that actually pulls
+ """
+
+ if klass is None:
+ klass = self.__class__
+ c = klass(q_object,pull_result)
+ return c
+
+ def exec_query(self):
+ """
+ The part that will say if it is True or it is False
+ """
+ raise Exception("Not implemted method you should subclass and override that method")
+
+ result = property(exec_query)
+
+
+ def __or__(self,other):
+ if not isinstance(other,BaseFuncQuery):
+ raise IncompatibleTypeOperation("You can not or an object which is not type of FuncQuery")
+ tmp_q = self.q | other.q
+ fresh_query = self._clone(q_object=tmp_q,pull_result=self.pull_result)
+ return fresh_query
+
+ def __and__(self,other):
+ if not isinstance(other,BaseFuncQuery):
+ raise IncompatibleTypeOperation("You can not or an object which is not type of FuncQuery")
+ tmp_q = self.q & other.q
+ fresh_query = self._clone(q_object=tmp_q,pull_result=self.pull_result)
+ return fresh_query
+
+ def __nonzero__(self):
+ return bool(self.q)
+
+ def __main_filter(self,outside_connector,inside_connector,*args,**kwargs):
+ """
+ Common OR and AND operation we do.
+
+ @param outside_connector : The connector between two chained query
+ @param inside_connector : The connector inside the query
+ """
+ temp_q = Q(*args,**kwargs)
+ if inside_connector == "OR":
+ #because the default is AND
+ temp_q.connector = inside_connector
+ if self.q:
+ current_q = deepcopy(self.q)
+ else:
+ current_q = None
+
+ if not current_q:
+ current_q = temp_q
+ else:
+ if outside_connector == "OR":
+ current_q = current_q | temp_q
+ else:
+ current_q = current_q & temp_q
+
+ fresh_query = self._clone(q_object=current_q,pull_result=self.pull_result)
+ return fresh_query
+
+
+
+ def filter(self,*args,**kwargs):
+ """
+ The filter method is the one that will be used most
+ of the time by end user ANDs the args inside.
+ """
+ return self.__main_filter("AND","AND",*args,**kwargs)
+
+ def filter_or(self,*args,**kwargs):
+ """
+ The filter method is the one that will be used most
+ of the time by end user ORs the args inside.
+ """
+ return self.__main_filter("OR","OR",*args,**kwargs)
+
+ def and_and(self,*args,**kwargs):
+ """
+ AND inside and connect with AND
+ """
+ return self.__main_filter("AND","AND",*args,**kwargs)
+
+ def and_or(self,*args,**kwargs):
+ """
+ OR inside and connect with AND
+ """
+ return self.__main_filter("AND","OR",*args,**kwargs)
+
+
+ def or_or(self,*args,**kwargs):
+ """
+ OR inside and connect with OR
+ """
+ return self.__main_filter("OR","OR",*args,**kwargs)
+
+ def or_and(self,*args,**kwargs):
+ """
+ AND inside and connect with OR
+ """
+ return self.__main_filter("OR","AND",*args,**kwargs)
+
+
+
+ def exclude(self,*args,**kwargs):
+ """
+ Useful when you want to ignore some of the things
+ in query,the exclude iverts the query NOT used so
+ much ...
+ """
+ temp_q = ~Q(*args,**kwargs)
+
+ if self.q:
+ current_q = deepcopy(self.q)
+ else:
+ current_q = None
+
+ if not self.q:
+ current_q = temp_q
+ else:
+ current_q.add(temp_q,"AND")
+ fresh_query = self._clone(q_object=current_q,pull_result=self.pull_result)
+ return fresh_query
+
+ def set_compexq(self,q_object,connector=None):
+ """
+ Sometimes we need some complex queries ORed
+ ANDed and etc, that is for that ONLY for
+ API users ....
+ """
+ if not connector or not self.q:
+ current_q = deepcopy(q_object)
+ else:
+ current_q = deepcopy(self.q)
+ current_q.add(q_object,connector)
+ fresh_query = self._clone(q_object=current_q,pull_result=self.pull_result)
+ return fresh_query
+
+ def __str__(self):
+ return str(self.q)
+
+class FuncLogicQuery(BaseFuncQuery):
+ """
+ Will be used to decide if a method will be
+ invoked on minion side ...
+ """
+ def exec_query_with_facts(self):
+ """
+ Sometimes you may need to see facts as
+ values ...
+ """
+ return (self.exec_query(),self.fact_dict)
+
+ def exec_query(self):
+ """
+ The part that will say it is True or it is False
+ """
+ self.fact_dict = {}
+ if not self.q:
+ raise Exception("You should set up some query object before executing it")
+
+
+ return self.__main_traverse(self.q)
+ result = property(exec_query)
+
+ def __traverse_query(self,node):
+ """
+ A recursive method that will be responsible for traversing
+ complex Q object.
+
+ @param node : Q node
+ """
+ logic_results=[]
+ for n in node.children:
+ if not type(n) == tuple and not type(n) == list:
+ result = self.__traverse_query(n)
+ logic_results.append(self.logic_operation(n,result))
+ else:
+ #here you will do some work
+ if not self.pull_result:
+ logic_results.append(n[1])
+ else:
+ logic_pull = self.pull_result(n)
+ #append the result if True or False
+ #print "What i append for logic ? ",logic_pull[0]
+ logic_results.append(logic_pull[0])
+ #keep also the fact value user may want to see em
+ for fact_name,fact_value in logic_pull[1].iteritems():
+ self.fact_dict[fact_name] = fact_value
+ return logic_results
+
+ def __main_traverse(self,q_ob):
+ """
+ Collects the final stuff
+
+ @param q_ob : Q node
+ """
+ tmp_res = self.__traverse_query(q_ob)
+ return self.logic_operation(q_ob,tmp_res)
+
+ def logic_operation(self,node,logic_list):
+ """
+ Just computes the logic of current list
+
+ @return : True or False
+ """
+
+ tmp_res = None
+ for res in logic_list:
+ if tmp_res == None:
+ tmp_res = res
+ else:
+ if node.connector == "AND":
+ tmp_res = tmp_res & res
+ else:
+ tmp_res = tmp_res | res
+ if node.negated:
+ tmp_res = not tmp_res
+ return tmp_res
+
+class FuncDataQuery(BaseFuncQuery):
+ """
+ A class which is desgined with intention to be used
+ for query minion results, and idea which will be cool
+ but needs an extreme branch :)
+ """
+ pass
+
+class IncompatibleTypeOperation(Exception):
+ pass
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/facts/query_utils.py
^
|
@@ -0,0 +1,40 @@
+"""
+Various data structures used in query construction.
+
+Factored out from django.db.models.query so that they can also be used by other
+modules without getting into circular import difficulties.
+"""
+
+from copy import deepcopy
+from func.minion.facts import tree
+
+class Q(tree.Node):
+ """
+ Encapsulates filters as objects that can then be combined logically (using
+ & and |).
+ """
+ # Connection types
+ AND = 'AND'
+ OR = 'OR'
+ default = AND
+
+ def __init__(self, *args, **kwargs):
+ super(Q, self).__init__(children=list(args) + kwargs.items())
+
+ def _combine(self, other, conn):
+ if not isinstance(other, Q):
+ raise TypeError(other)
+ obj = deepcopy(self)
+ obj.add(other, conn)
+ return obj
+
+ def __or__(self, other):
+ return self._combine(other, self.OR)
+
+ def __and__(self, other):
+ return self._combine(other, self.AND)
+
+ def __invert__(self):
+ obj = deepcopy(self)
+ obj.negate()
+ return obj
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/facts/tree.py
^
|
@@ -0,0 +1,119 @@
+#taken from django source for constructing queries
+"""
+A class for storing a tree graph. Primarily used for filter constructs in the
+ORM.
+"""
+
+from copy import deepcopy
+
+class Node(object):
+ """
+ A single internal node in the tree graph. A Node should be viewed as a
+ connection (the root) with the children being either leaf nodes or other
+ Node instances.
+ """
+ # Standard connector type. Clients usually won't use this at all and
+ # subclasses will usually override the value.
+ default = 'DEFAULT'
+
+ def __init__(self, children=None, connector=None, negated=False):
+ """
+ Constructs a new Node. If no connector is given, the default will be
+ used.
+
+ Warning: You probably don't want to pass in the 'negated' parameter. It
+ is NOT the same as constructing a node and calling negate() on the
+ result.
+ """
+ self.children = children and children[:] or []
+ self.connector = connector or self.default
+ self.subtree_parents = []
+ self.negated = negated
+
+ # We need this because of django.db.models.query_utils.Q. Q. __init__() is
+ # problematic, but it is a natural Node subclass in all other respects.
+ def _new_instance(cls, children=None, connector=None, negated=False):
+ """
+ This is called to create a new instance of this class when we need new
+ Nodes (or subclasses) in the internal code in this class. Normally, it
+ just shadows __init__(). However, subclasses with an __init__ signature
+ that is not an extension of Node.__init__ might need to implement this
+ method to allow a Node to create a new instance of them (if they have
+ any extra setting up to do).
+ """
+ obj = Node(children, connector, negated)
+ obj.__class__ = cls
+ return obj
+ _new_instance = classmethod(_new_instance)
+
+ def __str__(self):
+ if self.negated:
+ return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c
+ in self.children]))
+ return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in
+ self.children]))
+
+ def __deepcopy__(self, memodict):
+ """
+ Utility method used by copy.deepcopy().
+ """
+ obj = Node(connector=self.connector, negated=self.negated)
+ obj.__class__ = self.__class__
+ obj.children = deepcopy(self.children, memodict)
+ obj.subtree_parents = deepcopy(self.subtree_parents, memodict)
+ return obj
+
+ def __len__(self):
+ """
+ The size of a node if the number of children it has.
+ """
+ return len(self.children)
+
+ def __nonzero__(self):
+ """
+ For truth value testing.
+ """
+ return bool(self.children)
+
+ def __contains__(self, other):
+ """
+ Returns True is 'other' is a direct child of this instance.
+ """
+ return other in self.children
+
+ def add(self, node, conn_type):
+ """
+ Adds a new node to the tree. If the conn_type is the same as the root's
+ current connector type, the node is added to the first level.
+ Otherwise, the whole tree is pushed down one level and a new root
+ connector is created, connecting the existing tree and the new node.
+ """
+ if node in self.children and conn_type == self.connector:
+ return
+ if len(self.children) < 2:
+ self.connector = conn_type
+ if self.connector == conn_type:
+ if isinstance(node, Node) and (node.connector == conn_type or
+ len(node) == 1):
+ self.children.extend(node.children)
+ else:
+ self.children.append(node)
+ else:
+ obj = self._new_instance(self.children, self.connector,
+ self.negated)
+ self.connector = conn_type
+ self.children = [obj, node]
+
+ def negate(self):
+ """
+ Negate the sense of the root connector. This reorganises the children
+ so that the current node has a single child: a negated node containing
+ all the previous children. This slightly odd construction makes adding
+ new children behave more intuitively.
+
+ Interpreting the meaning of this negate is up to client code. This
+ method is useful for implementing "not" arrangements.
+ """
+ self.children = [self._new_instance(self.children, self.connector,
+ not self.negated)]
+ self.connector = self.default
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/func_arg.py
^
|
@@ -11,7 +11,7 @@
##
class ArgCompatibility(object):
- """
+ """
That class is going to test if the module that was created by module
writer if he/she obeys to the rules we put here
"""
@@ -42,7 +42,7 @@
@param : get_args_result : The dict with all method related info
"""
self.__args_to_check = get_args_result
-
+
#what options does each of the basic_types have :
self.__valid_args={
'int':('range','min','max',),
@@ -58,7 +58,7 @@
def _is_type_options_compatible(self,argument_dict):
"""
Checks the method's argument_dict's options and looks inside
- self.__valid_args to see if the used option is there
+ self.__valid_args to see if the used option is there
@param : argument_dict : current argument to check
@return : True of raises IncompatibleTypesException
@@ -67,14 +67,14 @@
#did module writer add a key 'type'
if not argument_dict.has_key('type') or not self.__valid_args.has_key(argument_dict['type']):
raise IncompatibleTypesException("%s is not in valid options,possible ones are :%s"%(argument_dict['type'],str(self.__valid_args)))
-
+
#we need some specialization about if user has defined options
#there is no need for using validator,min_lenght,max_length
if argument_dict.has_key('options'):
for arg_option in argument_dict.keys():
if arg_option!='options' and arg_option in self.__valid_args['string']:
raise IncompatibleTypesException('The options keyword should be used alone in a string cant be used with min_length,max_length,validator together')
-
+
#if range keyword is used into a int argument the others shouldnt be there
if argument_dict.has_key('range'):
if len(argument_dict['range'])!=2:
@@ -85,14 +85,14 @@
for arg_option in argument_dict.keys():
if arg_option!='range' and arg_option in self.__valid_args['int']:
raise IncompatibleTypesException('The options range should be used alone into a int argument')
-
+
# we will use it everytime so not make lookups
the_type = argument_dict['type']
from itertools import chain #may i use chain ?
for key,value in argument_dict.iteritems():
-
+
if key == "type":
continue
if key not in chain(self.__valid_args[the_type],self.__common_options):
@@ -103,20 +103,20 @@
def _is_basic_types_compatible(self,type_dict):
"""
- Validates that if the types that were submitted with
+ Validates that if the types that were submitted with
get_method_args were compatible with our format above
in __basic_types
- @param : type_dict : The type to examine
+ @param : type_dict : The type to examine
@return : True or raise IncompatibleTypesException Exception
"""
#print "The structure we got is %s:"%(type_dict)
for key,value in type_dict.iteritems():
- #do we have that type
+ #do we have that type
if not self.__basic_types.has_key(key):
raise IncompatibleTypesException("%s not in the basic_types"%key)
-
+
#if type matches and dont match default
#print "The key: %s its value %s and type %s"%(key,value,type(value))
if key!='default' and type(value)!=type(self.__basic_types[key]):
@@ -128,15 +128,19 @@
"""
Method inspects the method arguments and checks if the user
has registered all the arguments succesfully and also adds a
- 'order' keyword to method arguments to
+ 'order' keyword to method arguments to
"""
import inspect
from itertools import chain
#get the arguments from real object we have [args],*arg,**kwarg,[defaults]
- tmp_arguments=inspect.getargspec(getattr(cls,method_name))
+ #tmp_arguments=inspect.getargspec(getattr(cls,method_name))
+
+ #overriden args is something i created it is a hack !
+ tmp_arguments = getattr(cls, method_name).overriden_args
check_args=[arg for arg in chain(tmp_arguments[0],tmp_arguments[1:3]) if arg and arg!='self']
- #print "The arguments taken from the inspect are :",check_args
- #the size may change of the hash so should a copy of it
+ #raise Exception("The check_args are like %s"%str(check_args))
+ #print "The arguments taken from the inspect are :",check_args
+ #the size may change of the hash so should a copy of it
copy_arguments = arguments.copy()
for compare_arg in copy_arguments.iterkeys():
if not compare_arg in check_args:
@@ -151,18 +155,18 @@
"""
Validates the output for minion module's
get_method_args method
-
+
The structure that is going to be validated is in that format :
-
+
{
method_name1 : {'args':{...},
'description':"wowo"},
method_name12 : {...}
}
-
+
@return : True or raise IncompatibleTypesException Exception
"""
-
+
for method in self.__args_to_check.iterkeys():
#here we got args or description part
#check if user did submit something not in the __method_options
@@ -186,7 +190,7 @@
class IncompatibleTypesException(Exception):
"""
- Raised when we assign some values that breaksour rules
+ Raised when we assign some values that breaksour rules
@see ArgCompatibility class for allowed situations
"""
def __init__(self, value=None):
@@ -221,6 +225,6 @@
class ArgumentRegistrationError(IncompatibleTypesException):
"""
When user forgets to register soem of the arguments in the list
- or adds some argument that is not there
+ or adds some argument that is not there
"""
pass
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/bridge.py
^
|
@@ -18,34 +18,45 @@
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
+"""
+Func module for bridge management
+"""
+
+__author__ = "Jasper Capel <capel@stone-it.com>"
+__version__ = "0.0.3"
+__api_version__ = "0.0.2"
+
import func_module
import os, re
+from certmaster.config import BaseConfig, Option, ListOption
+
class Bridge(func_module.FuncModule):
- version = "0.0.2"
- api_version = "0.0.2"
+ version = __version__
+ api_version = __api_version__
description = "Func module for Bridge management"
- # A list of bridge names that should be ignored. You can use this if you
- # have bridges that should never be touched by func.
- # This should go the the module-specific configuration file in the future.
- # Will ignore virbr0 by default, as it's managed by libvirtd, it's probably
- # a bad idea to touch it.
- ignorebridges = [ "virbr0" ]
- brctl = "/usr/sbin/brctl"
- ip = "/sbin/ip"
- ifup = "/sbin/ifup"
- ifdown = "/sbin/ifdown"
+ class Config(BaseConfig):
+ ignorebridges = ListOption()
+ brctl = Option("/usr/sbin/brctl")
+ ip = Option("/sbin/ip")
+ ifup = Option("/sbin/ifup")
+ ifdown = Option("/sbin/ifdown")
def list(self, listvif=True):
- # Returns a dictionary. Elements look like this:
- # key: bridgename, value: [ interface1, interface2, ..., interfacen ]
- # If listvif is provided as a parameter and set to false, the xen-style
- # virtual interfaces (vifX.Y) will be omitted from the listing.
+ """
+ List bridges.
+
+ Returns a dictionary. Elements look like this:
+ key: bridgename, value: [ interface1, interface2, ..., interfaceN ]
+
+ Keyword arguments:
+ listvif -- Boolean: when False, xen-style virtual interfaces (vifX.Y) will be omitted from the listing
+ """
retlist = {}
- command = self.brctl + " show"
+ command = self.options.brctl + " show"
fp = os.popen(command)
@@ -69,7 +80,7 @@
if len(elements) > 1:
# Line containing a new bridge name + interface
curbr = elements[0]
- if not curbr in self.ignorebridges:
+ if not curbr in self.options.ignorebridges:
if len(elements) == 3:
# This is a bridge without connected devices
retlist[elements[0]] = [ ]
@@ -85,15 +96,21 @@
elif len(elements) == 1:
# Dictionary key containing interface name should already
# exist, append the interface.
- if not curbr in self.ignorebridges:
- if not vifpattern.match(elements[0]) and listvif == True:
- retlist[curbr].append(elements[0])
-
+ if not curbr in self.options.ignorebridges:
+ if vifpattern.match(elements[0]) and listvif == False:
+ continue
+ retlist[curbr].append(elements[0])
+
return retlist
def list_permanent(self):
- # Returns a list of permanent bridges (bridges configured to be enabled
- # at boot-time.
+ """
+ List bridges which are configured to be enabled at boot time (in other words, for which an ifcfg-file exists)
+
+ Returns a list of permanent bridges (bridges configured to be enabled at boot-time:
+ key: bridgename, value: [ interface1, interface2, ..., interfaceN ]
+ """
+
retlist = {}
ifpattern = re.compile('ifcfg-([a-z0-9]+)')
# RHEL treats this value as case-sensitive, so so will we.
@@ -146,13 +163,21 @@
# is orphaned.
retlist[brname].append(ifname)
return retlist
-
+
def add_bridge(self, brname):
- # Creates a bridge
- if brname not in self.ignorebridges:
+ """
+ Creates a bridge
+
+ Keyword arguments:
+ brname -- Name for this bridge (string, ex: "br0")
+ """
+
+ if brname not in self.options.ignorebridges:
brlist = self.list()
if brname not in brlist:
- exitcode = os.spawnv(os.P_WAIT, self.brctl, [ self.brctl, "addbr", brname ] )
+ exitcode = os.spawnv(os.P_WAIT, self.options.brctl, [ self.options.brctl, "addbr", brname ] )
+ if exitcode == 0:
+ os.spawnv(os.P_WAIT, self.options.brctl, [ self.options.brctl, "setfd", brname, "0" ] )
else:
# Bridge already exists, return 0 anyway.
exitcode = 0
@@ -162,12 +187,20 @@
return exitcode
def add_bridge_permanent(self, brname, ipaddr=None, netmask=None, gateway=None):
- # Creates a permanent bridge (writes to
- # /etc/sysconfig/network-scripts)
- if brname not in self.ignorebridges:
+ """
+ Creates a permanent bridge (this creates an ifcfg-file)
+
+ Keyword arguments:
+ brname -- Name for this bridge (string, ex: "br0")
+ ipaddr -- IP address for this bridge (string)
+ netmask -- Netmask for this bridge (string)
+ gateway -- Gateway address for this bridge (string)
+ """
+
+ if brname not in self.options.ignorebridges:
filename = "/etc/sysconfig/network-scripts/ifcfg-%s" % brname
fp = open(filename, "w")
- filelines = [ "DEVICE=%s\n" % brname, "TYPE=Bridge\n", "ONBOOT=yes\n" ]
+ filelines = [ "DEVICE=%s\n" % brname, "TYPE=Bridge\n", "ONBOOT=yes\n", "DELAY=0\n" ]
if ipaddr != None:
filelines.append("IPADDR=%s\n" % ipaddr)
if netmask != None:
@@ -176,18 +209,25 @@
filelines.append("GATEWAY=%s\n" % gateway)
fp.writelines(filelines)
fp.close()
- exitcode = os.spawnv(os.P_WAIT, self.ifup, [ self.ifup, brname ] )
+ exitcode = os.spawnv(os.P_WAIT, self.options.ifup, [ self.options.ifup, brname ] )
else:
exitcode = -1
return exitcode
def add_interface(self, brname, ifname):
- # Adds an interface to a bridge
- if brname not in self.ignorebridges:
+ """
+ Adds an interface to a bridge
+
+ Keyword arguments:
+ brname -- Bridge name (string, ex: "br0")
+ ifname -- Interface to add to bridge (string, ex: "eth3")
+ """
+
+ if brname not in self.options.ignorebridges:
brlist = self.list()
if ifname not in brlist[brname]:
- exitcode = os.spawnv(os.P_WAIT, self.brctl, [ self.brctl, "addif", brname, ifname ] )
+ exitcode = os.spawnv(os.P_WAIT, self.options.brctl, [ self.options.brctl, "addif", brname, ifname ] )
else:
# Interface is already a member of this bridge, return 0
# anyway.
@@ -198,8 +238,15 @@
return exitcode
def add_interface_permanent(self, brname, ifname):
- # Permanently adds an interface to a bridge.
- # Both interface and bridge must have a ifcfg-file we can write to.
+ """
+ Permanently adds an interface to a bridge.
+ Both interface and bridge must have a ifcfg-file we can write to.
+
+ Keyword arguments:
+ brname -- Bridge name (string, ex: "br0")
+ ifname -- Interface name (string, ex: "eth2")
+ """
+
brfilename = "/etc/sysconfig/network-scripts/ifcfg-%s" % brname
iffilename = "/etc/sysconfig/network-scripts/ifcfg-%s" % ifname
if os.path.exists(brfilename) and os.path.exists(iffilename):
@@ -232,41 +279,63 @@
return exitcode
def delete_bridge(self, brname):
- # Deletes a bridge
- if brname not in self.ignorebridges:
+ """
+ Deletes a bridge
+
+ Keyword arguments:
+ brname -- Bridge name (string, ex: "br0")
+ """
+ if brname not in self.options.ignorebridges:
# This needs some more error checking. :)
self.down_bridge(brname)
- exitcode = os.spawnv(os.P_WAIT, self.brctl, [ self.brctl, "delbr", brname ] )
+ exitcode = os.spawnv(os.P_WAIT, self.options.brctl, [ self.options.brctl, "delbr", brname ] )
else:
exitcode = -1
return exitcode
def delete_bridge_permanent(self, brname):
- # Deletes a bridge permanently
+ """
+ Permanently deletes a bridge. This bridge must be configured through an ifcfg-file.
+
+ Keyword arguments:
+ brname -- Bridge name (ex: br0)
+ """
filename = "/etc/sysconfig/network-scripts/ifcfg-%s" % brname
- if brname not in self.ignorebridges:
+ if brname not in self.options.ignorebridges:
returncode = self.delete_bridge(brname)
if os.path.exists(filename):
os.remove(filename)
else:
returncode = -1
return returncode
-
+
def delete_interface(self, brname, ifname):
- # Deletes an interface from a bridge
- if brname not in self.ignorebridges:
- exitcode = os.spawnv(os.P_WAIT, self.brctl, [ self.brctl, "delif", brname, ifname ] )
+ """
+ Deletes an interface from a bridge
+
+ Keyword arguments:
+ brname -- Bridge name (ex: br0)
+ ifname -- Interface to remove (ex: eth2)
+ """
+ if brname not in self.options.ignorebridges:
+ exitcode = os.spawnv(os.P_WAIT, self.options.brctl, [ self.options.brctl, "delif", brname, ifname ] )
else:
exitcode = -1
return exitcode
def delete_interface_permanent(self, brname, ifname):
- # Permanently deletes interface from bridge
+ """
+ Permanently deletes interface from bridge (interface must have an ifcfg-file)
+
+ Keyword arguments:
+ brname -- Bridge name (ex: br0)
+ ifname -- Interface to remove (ex: eth2)
+ """
iffilename = "/etc/sysconfig/network-scripts/ifcfg-%s" % ifname
- if brname in self.ignorebridges:
+ if brname in self.options.ignorebridges:
exitcode = -1
elif os.path.exists(iffilename):
# This only works if the interface itself is permanent
@@ -294,8 +363,14 @@
return exitcode
def delete_all_interfaces(self, brname):
- # Deletes all interfaces from a bridge
- if brname not in self.ignorebridges:
+ """
+ Deletes all interfaces from a bridge
+
+ Keyword arguments:
+ brname -- Bridge name (ex: "br0")
+ """
+
+ if brname not in self.options.ignorebridges:
bridgelist = self.list()
if brname in bridgelist:
# Does this bridge exist?
@@ -312,8 +387,13 @@
return exitcode
def delete_all_interfaces_permanent(self, brname):
- # Permanently deletes all interfaces from a bridge
- if brname not in self.ignorebridges:
+ """
+ Permanently deletes all interfaces from a bridge
+
+ Keyword arguments:
+ brname -- Bridge name (string, ex: "br0")
+ """
+ if brname not in self.options.ignorebridges:
bridgelist = self.list_permanent()
if brname in bridgelist:
exitcode = 0
@@ -333,7 +413,12 @@
return exitcode
def make_it_so(self, newconfig):
- # Applies supplied configuration to system
+ """
+ Applies supplied configuration to system
+
+ Keyword arguments;
+ newconfig -- Configuration (dictionary, ex: {"br0": ["eth1", "eth2"]})
+ """
# The false argument is to make sure we don't get the VIFs in the
# listing.
@@ -367,7 +452,9 @@
return self.list()
def write(self):
- # Applies running configuration to startup configuration
+ """
+ Applies running configuration to startup configuration
+ """
# The false argument is to make sure we don't get the VIFs in the
# listing.
@@ -402,24 +489,36 @@
return self.list_permanent()
def add_promisc_bridge(self, brname, ifname):
- # Creates a new bridge brname, attaches interface ifname to it and sets
- # the MAC address of the connected interface to FE:FF:FF:FF:FF:FF so
- # traffic can flow freely through the bridge. This is required for use
- # with Xen.
+ """
+ Creates a new bridge, attaches an interface to it and sets
+ the MAC address of the connected interface to FE:FF:FF:FF:FF:FF so
+ traffic can flow freely through the bridge. This seems to be required
+ for use with xen.
+
+ Keyword arguments:
+ brname -- Bridge name (string, ex: "br0")
+ ifname -- Interface name (string, ex: "eth2")
+ """
+
addbrret = self.add_bridge(brname)
addifret = self.add_interface(brname,ifname)
# Set the MAC address of the interface we're adding to the bridge to
# FE:FF:FF:FF:FF:FF. This is consistent with the behaviour of the
# Xen network-bridge script.
- setaddrret = os.spawnv(os.P_WAIT, self.ip, [ self.ip, "link", "set", ifname, "address", "fe:ff:ff:ff:ff:ff" ])
+ setaddrret = os.spawnv(os.P_WAIT, self.options.ip, [ self.options.ip, "link", "set", ifname, "address", "fe:ff:ff:ff:ff:ff" ])
if addbrret or addifret or setaddrret:
return -1
else:
return 0
def updown_bridge(self, brname, up):
- # Marks a bridge and all it's connected interfaces up or down (used
- # internally)
+ """
+ Marks a bridge and all it's connected interfaces up or down (used internally)
+
+ Keyword arguments:
+ brname -- Bridge name (string, ex: "br0")
+ up -- Whether to mark this bridge up. (Boolean, ex: false, when false, it marks everything as down)
+ """
if up:
updown = "up"
@@ -438,17 +537,22 @@
exitcode = 0
for ifname in interfaces:
- retcode = os.spawnv(os.P_WAIT, self.ip, [self.ip, "link", "set", ifname, updown ] )
+ retcode = os.spawnv(os.P_WAIT, self.options.ip, [self.options.ip, "link", "set", ifname, updown ] )
if retcode != 0:
exitcode = retcode
return exitcode
def up_bridge(self, brname):
- # Marks a bridge and all it's connected interfaces up
+ """
+ Marks a bridge and all it's connected interfaces up
+ """
+
return self.updown_bridge(brname, 1)
def down_bridge(self, brname):
- # Marks a bridge and all it's connected interfaces down
- return self.updown_bridge(brname, 0)
+ """
+ Marks a bridge and all it's connected interfaces down
+ """
+ return self.updown_bridge(brname, 0)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/certmastermod.py
^
|
@@ -13,6 +13,8 @@
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
+import os
+
# our modules
import func_module
from certmaster import certmaster as certmaster
@@ -31,7 +33,7 @@
"""
cm = certmaster.CertMaster()
return cm.get_csrs_waiting()
-
+
def get_signed_certs(self):
"""
Returns a list of all signed certs on this minion
@@ -46,7 +48,7 @@
list_of_hosts = self.__listify(list_of_hosts)
cm = certmaster.CertMaster()
for x in list_of_hosts:
- cm.sign_this_csr(x)
+ cm.sign_this_csr(x)
return True
def cleanup_hosts(self, list_of_hosts):
@@ -56,9 +58,61 @@
list_of_hosts = self.__listify(list_of_hosts)
cm = certmaster.CertMaster()
for x in list_of_hosts:
- cm.remove_this_cert(x)
+ cm.remove_this_cert(x)
return True
+ def peering_enabled(self):
+ """
+ Return config value for "peering"
+ """
+ return certmaster.CertMaster().cfg.peering
+
+ def known_peers(self):
+ """
+ Return a list of (host, sha) tuples for each known peer
+
+ Re-uses copyfile module for checksum.
+ """
+ import func.minion.modules.copyfile as copyfile
+ cm = certmaster.CertMaster()
+ files = cm.get_peer_certs()
+ cf = copyfile.CopyFile()
+
+ results = []
+ for f in files:
+ hostname = os.path.basename(f)
+ hostname = hostname.replace('.' + cm.cfg.cert_extension, "")
+ digest = cf.checksum(f)
+ results.append((hostname, digest))
+
+ return results
+
+ def remove_peer_certs(self, peers):
+ """
+ Remove the peer certificates for each host in 'peers'
+ """
+ cm = certmaster.CertMaster()
+ for p in peers:
+ certname = "%s.%s" % (p, cm.cfg.cert_extension)
+ certname = os.path.join(cm.cfg.peerroot, certname)
+ try:
+ os.unlink(certname)
+ except OSError:
+ # cert doesn't exist
+ pass
+ return True
+
+ def copy_peer_cert(self, peer, certblob):
+ """
+ Install certblob as the certificate for peer
+ """
+ import func.minion.modules.copyfile as copyfile
+ cm = certmaster.CertMaster()
+ certname = '%s.%s' % (peer, cm.cfg.cert_extension)
+ path = os.path.join(cm.cfg.peerroot, certname)
+ cf = copyfile.CopyFile()
+ return cf.copyfile(path, certblob)
+
def __listify(self, list_of_hosts):
if type(list_of_hosts) is type([]):
return list_of_hosts
@@ -67,7 +121,7 @@
def register_method_args(self):
"""
- Export certmaster module
+ Export certmaster module
"""
list_of_hosts = {
@@ -96,5 +150,26 @@
'list_of_hosts':list_of_hosts
},
'description':"Clean the certs for specified hosts"
+ },
+ 'peering_enabled':{
+ 'args':{},
+ 'description':"Whether or not peering is enabled"
+ },
+ 'known_peers':{
+ 'args':{},
+ 'description':"What peers are known"
+ },
+ 'remove_peer_certs':{
+ 'args':{
+ 'peers':'List of peers to remove',
+ },
+ 'description':'Remove peer certificate for one or more peers'
+ },
+ 'copy_peer_cert':{
+ 'args':{
+ 'peer':'Name of the peer',
+ 'certblob':'Certificate data',
+ },
+ 'description':'Copy certblob for peer'
}
}
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/command.py
^
|
@@ -9,6 +9,7 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
"""
Abitrary command execution module for func.
"""
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/modules/confmgt_augeas.py
^
|
@@ -0,0 +1,391 @@
+#!/usr/bin/env python
+
+#
+# Copyright 2008
+# Louis Coilliot <louis.coilliot@wazemmes.org>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+import func_module
+from os import path as ospath,getenv as osgetenv
+from time import strftime
+
+def lstripstr(the_string,the_prefix):
+ """Return a copy of the string with leading prefix removed."""
+ if the_string.startswith(the_prefix):
+ return the_string[len(the_prefix):]
+ return the_string
+
+def recurmatch(aug, path):
+ """Generate all tree children of a start path."""
+ #Function adapted from test_augeas.py in python-augeas-0.3.0
+ #Original Author: Harald Hoyer <harald@redhat.com>"""
+ if path:
+ if path != "/":
+ val = aug.get(path)
+ if val:
+ yield (path, val)
+ # here is the modification to (almost) match augtool print behavior:
+ else:
+ yield (path, '(none)')
+ # end of modification
+
+ m = []
+ if path != "/":
+ aug.match(path)
+ for i in m:
+ for x in recurmatch(aug, i):
+ yield x
+ else:
+ for i in aug.match(path + "/*"):
+ for x in recurmatch(aug, i):
+ yield x
+
+
+class confMgtAug(func_module.FuncModule):
+ version = "0.0.1"
+ api_version = "0.0.1"
+ description = "Manage parameters in configuration files, with the help of Augeas."
+
+
+ def get(self,entryPath,param='',hierarchy='/files'):
+ """Get a value for a config. parameter in a config. file,
+with the help of Augeas, a configuration API (cf http://augeas.net)"""
+ try:
+ from augeas import Augeas
+ aug=Augeas()
+ except Exception, e: return str(e)
+ # yes, entryPath.rstrip('/')+'/' is really needed (i.e. entryPath=/)
+ path=(hierarchy+entryPath.rstrip('/')+'/'+param).rstrip('/')
+ try:
+ matchtest=aug.match(path)
+ except Exception, e: return str(e)
+ if matchtest:
+ try:
+ pvalue=aug.get(path)
+ #aug.close()
+ except Exception, e: return str(e)
+ else:
+ # The node doesn't exist
+ pvalue='(o)'
+
+ if not pvalue:
+ # The node doesn't have a value
+ pvalue='(none)'
+
+ return { 'path': entryPath, 'parameter': param, 'value': pvalue, 'hierarchy': hierarchy }
+
+
+ def set(self,entryPath,param='',pvalue='',hierarchy='/files'):
+ """Set/change a value for a config. parameter in a config. file,
+with the help of Augeas, a configuration API (cf http://augeas.net)"""
+ try:
+ from augeas import Augeas
+ aug=Augeas()
+ except Exception, e: return str(e)
+
+ path=(hierarchy+entryPath.rstrip('/')+'/'+param).rstrip('/')
+
+ try:
+ aug.set(path,pvalue)
+ except Exception, e: return str(e)
+ # Here is a little workaround for a bug in save for augeas.
+ # In the future this won't be necessary anymore.
+ try:
+ aug.save()
+ except:
+ pass
+ # End of workaround
+ try:
+ aug.save()
+ except Exception, e: return str(e)
+
+ try:
+ pvalue=aug.get(path)
+ #aug.close()
+ except Exception, e: return str(e)
+
+ return { 'path': entryPath, 'parameter': param, 'value': pvalue, 'hierarchy': hierarchy }
+
+
+ def match(self,entryPath,param='',pvalue='',hierarchy='/files'):
+ """Match a value for a config. parameter in a config. file,
+with the help of Augeas, a configuration API (cf http://augeas.net)"""
+ try:
+ from augeas import Augeas
+ aug=Augeas()
+ except Exception, e: return str(e)
+
+ path=(hierarchy+entryPath.rstrip('/')+'/'+param).rstrip('/')
+ childpath=(hierarchy+entryPath.rstrip('/')+'/*/'+param).rstrip('/')
+
+ if pvalue:
+ try:
+ matchlist = [ ospath.dirname(lstripstr(item,'/files')) for item in aug.match(path) + aug.match(childpath) if ( aug.get(item) == pvalue ) ]
+ #aug.close()
+ except Exception, e: return str(e)
+ else:
+ try:
+ matchlist = [ ospath.dirname(lstripstr(item,'/files')) for item in aug.match(path) + aug.match(childpath) ]
+ #aug.close()
+ except Exception, e: return str(e)
+ return matchlist
+
+
+ def ls(self,entryPath,hierarchy='/files'):
+ """List the direct children of an entry in a config. file,
+with the help of Augeas, a configuration API (cf http://augeas.net)"""
+ try:
+ from augeas import Augeas
+ aug=Augeas()
+ except Exception, e: return str(e)
+ path=hierarchy+entryPath.rstrip('/')+'/*'
+ # We can't use a dict here because the same key can appear many times.
+ nodes=[]
+ try:
+ for match in aug.match(path):
+ pvalue=aug.get(match)
+ if not pvalue:
+ pvalue='(none)'
+ nodes.append([ospath.basename(match),pvalue])
+ except Exception, e: return str(e)
+
+ #try:
+ # aug.close()
+ #except Exception, e: return str(e)
+
+ return { 'path': entryPath, 'nodes': nodes, 'hierarchy': hierarchy }
+
+
+ # print is a reserved word so we use printconf instead
+ def printconf(self,entryPath,hierarchy='/files'):
+ """Print all tree children nodes from the path provided,
+with the help of Augeas, a configuration API (cf http://augeas.net)"""
+ path=hierarchy+entryPath
+ try:
+ from augeas import Augeas
+ aug=Augeas()
+ except Exception, e: return str(e)
+ matches = recurmatch(aug, path)
+ # Here we loose the benefit of the generator function:
+ return { 'path': entryPath, 'nodes':[ [lstripstr(p,'/files'),attr] for (p,attr) in matches ], 'hierarchy': hierarchy }
+
+
+
+ def rm(self,entryPath,param='',hierarchy='/files'):
+ """Delete a parameter (and all its children) in a config. file,
+with the help of Augeas, a configuration API (cf http://augeas.net)"""
+ try:
+ from augeas import Augeas
+ aug=Augeas()
+ except Exception, e: return str(e)
+
+ path=(hierarchy+entryPath.rstrip('/')+'/'+param).rstrip('/')
+
+ try:
+ result=aug.remove(path)
+ #aug.close()
+ except Exception, e: return str(e)
+ # Here is a little workaround for a bug in save for augeas.
+ # In the future this should not be necessary anymore.
+ try:
+ aug.save()
+ except:
+ pass
+ # End of workaround
+ try:
+ aug.save()
+ except Exception, e: return str(e)
+ if result == -1:
+ msg = 'Invalid node'
+ else:
+ msg = repr(result)+' node(s) removed.'
+ return msg
+
+ def getenv(self,varname):
+ """Get an environment variable."""
+ varvalue=osgetenv(varname)
+ if varvalue == None:
+ varvalue = '(none)'
+ return { varname : varvalue }
+
+ def backup(self,entryPath):
+ """Backup a file with a timestamp. Cautious before applying modifications on a configuration file."""
+ try:
+ import shutil
+ except Exception, e: return str(e)
+ backupPath=entryPath+'.'+strftime('%Y%m%d-%H%M')
+ try:
+ if not ospath.exists(backupPath):
+ shutil.copy(entryPath, backupPath)
+ msg='File '+entryPath+' backed up to '+ backupPath
+ else:
+ msg='Backup file '+backupPath+' already exists'
+ except (OSError, IOError), e: return str(e)
+ return msg
+
+
+ def register_method_args(self):
+ """
+ Implementing the method arg getter
+ """
+
+ return {
+ 'get':{
+ 'args':{
+ 'entryPath':{
+ 'type':'string',
+ 'optional':False,
+ 'description':'The path to the config. file (fs or Augeas path)',
+ },
+ 'param':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'',
+ 'description':'The target parameter in the config. file'
+ },
+ 'hierarchy':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'/files',
+ 'description':'The augeas base path hierarchy'
+ }
+ },
+ 'description':"Get a value for a config. parameter in a config. file."
+ },
+ 'set':{
+ 'args':{
+ 'entryPath':{
+ 'type':'string',
+ 'optional':False,
+ 'description':'The path to the config. file (fs or Augeas path)',
+ },
+ 'param':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'',
+ 'description':'The target parameter in the config. file'
+ },
+ 'pvalue':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'',
+ 'description':'The value to set for the parameter in the config. file'
+ },
+ 'hierarchy':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'/files',
+ 'description':'The augeas base path hierarchy'
+ }
+ },
+ 'description':"Set/change a value for a config. parameter in a config. file."
+ },
+ 'match':{
+ 'args':{
+ 'entryPath':{
+ 'type':'string',
+ 'optional':False,
+ 'description':'The path to the config. file (fs or Augeas path)',
+ },
+ 'param':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'',
+ 'description':'The target parameter in the config. file'
+ },
+ 'pvalue':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'',
+ 'description':'The value to set for the parameter in the config. file'
+ },
+ 'hierarchy':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'/files',
+ 'description':'The augeas base path hierarchy'
+ }
+ },
+ 'description':"Match a value for a config. parameter in a config. file."
+ },
+ 'ls':{
+ 'args':{
+ 'entryPath':{
+ 'type':'string',
+ 'optional':False,
+ 'description':'The path to the config. file (fs or Augeas path)',
+ },
+ 'hierarchy':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'/files',
+ 'description':'The augeas base path hierarchy'
+ }
+ },
+ 'description':"List the direct children of an entry in a config. file."
+ },
+ 'printconf':{
+ 'args':{
+ 'entryPath':{
+ 'type':'string',
+ 'optional':False,
+ 'description':'The path to the config. file (fs or Augeas path)',
+ },
+ 'hierarchy':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'/files',
+ 'description':'The augeas base path hierarchy'
+ }
+ },
+ 'description':"Print all tree children nodes from the path provided."
+ },
+ 'rm':{
+ 'args':{
+ 'entryPath':{
+ 'type':'string',
+ 'optional':False,
+ 'description':'The path to the config. file (fs or Augeas path)',
+ },
+ 'param':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'',
+ 'description':'The target parameter in the config. file'
+ },
+ 'hierarchy':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'/files',
+ 'description':'The augeas base path hierarchy'
+ }
+ },
+ 'description':"Delete a parameter (and all its children) in a config. file."
+ },
+ 'getenv':{
+ 'args':{
+ 'varname':{
+ 'type':'string',
+ 'optional':False,
+ 'description':'The name of the environment variable to get',
+ }
+ },
+ 'description':"Get an environment variable."
+ },
+ 'backup':{
+ 'args':{
+ 'entryPath':{
+ 'type':'string',
+ 'optional':False,
+ 'description':'The path to the config. file',
+ }
+ },
+ 'description':"Backup a file with a timestamp."
+ }
+ }
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/copyfile.py
^
|
@@ -8,8 +8,19 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+try:
+ import hashlib
+except ImportError:
+ # Python-2.4.z ... gah! (or even 2.3!)
+ import sha
+ class hashlib:
+ @staticmethod
+ def new(algo):
+ if algo == 'sha1':
+ return sha.new()
+ raise ValueError, "Bad checksum type"
+
-import sha
import os
import time
import shutil
@@ -24,14 +35,14 @@
description = "Allows for smart copying of a file."
def _checksum_blob(self, blob):
- thissum = sha.new()
+ thissum = hashlib.new('sha1')
thissum.update(blob)
return thissum.hexdigest()
-
+
def checksum(self, thing):
CHUNK=2**16
- thissum = sha.new()
+ thissum = hashlib.new('sha1')
if os.path.exists(thing):
fo = open(thing, 'r', CHUNK)
chunk = fo.read
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/modules/cpu.py
^
|
@@ -0,0 +1,118 @@
+
+#
+# Copyright 2011
+# Tomas Edwardsson <tommi@tommi.org>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+import func_module
+from timemodule import sleep
+from func.minion.codes import FuncException
+
+SAMPLE_TIMER = 5
+MAX_SAMPLE_TIMER = 18
+
+class CpuModule(func_module.FuncModule):
+ version = "0.0.1"
+ api_version = "0.0.1"
+ description = "Gathering CPU related information"
+
+ def jiffies(self):
+
+ # Which fields we are parsing from /proc stat
+ fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'steal', 'guest']
+
+ # Returning struct
+ res = {}
+
+ # Open the /proc/stat
+ try:
+ stat_fd = open("/proc/stat", "r")
+ except Exception, e:
+ raise FuncException("Unable to open /proc/stat: %s" % (e))
+
+ # Run through the contents of /proc/stat
+ for statline in stat_fd.readlines():
+ # We don't care about non CPU stuff
+ if statline[0:3] != 'cpu':
+ break
+ statline = (statline.split())
+
+ # split cpu name and its stats
+ cpu, stat = statline[0], statline[1:]
+
+ # Total jiffies for this cpu
+ total = 0
+
+ # Create the dictionary
+ res[cpu] = {}
+
+ # Run through stats, matching with named fields
+ for i in xrange(1, (len(stat))):
+ try:
+ res[cpu][fields[i]] = int(stat[i])
+ except IndexError:
+ break
+ total += int(stat[i])
+
+ # Record total jiffies
+ res[cpu]['total'] = total
+
+ return res
+
+ def usage(self, sampletime=SAMPLE_TIMER):
+ """
+ Returns percentage CPU utilization in an given
+ timeperiod.
+ """
+ if int(sampletime) > MAX_SAMPLE_TIMER:
+ raise FuncException("sampletime maximum is %s" % MAX_SAMPLE_TIMER)
+
+ # Get CPU statistics
+ prestat = self.jiffies()
+
+ # Wait for some activity
+ sleep(int(sampletime))
+
+ # Re fetch CPU statistics
+ poststat = self.jiffies()
+
+ # Dict to store results
+ results = {}
+
+ # Run through each CPU entry
+ for cpu in prestat.keys():
+ total = poststat[cpu]['total'] - prestat[cpu]['total']
+ results[cpu] = {}
+ for k in prestat[cpu].keys():
+ if k == 'total': continue
+ # Calculate the percentage
+ results[cpu][k] = float(poststat[cpu][k] - prestat[cpu][k]) / float(total) * 100
+ return results
+
+ def register_method_args(self):
+ """
+ Register the CPU method arguments
+ """
+ return{
+ 'usage':{
+ 'args':{
+ 'sampletime':{
+ 'type': 'int',
+ 'default': SAMPLE_TIMER,
+ 'optional':True,
+ 'description':'How long to sample CPU usage',
+ },
+ },
+ 'description':'Gather CPU data over period and return percent averages',
+ },
+ 'jiffies':{
+ 'args':{},
+ 'description':'Fetch the CPU jiffies from /proc/stat',
+ },
+ }
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/delegation.py
^
|
@@ -21,11 +21,11 @@
DELEGATED = True
class DelegationModule(func_module.FuncModule):
-
+
version = "0.0.1"
api_version = "0.0.1"
description = "Minion-side module to support delegation on sub-Overlords."
-
+
def run(self,module,method,args,delegation_list,async,nforks):
"""
Delegates commands down the path of delegation
@@ -33,11 +33,11 @@
"""
result_dict = {}
job_id_list = []
-
- #separate list passed to us into minions we can call directly and
+
+ #separate list passed to us into minions we can call directly and
#further delegation paths
(single_paths, grouped_paths) = dtools.group_paths(delegation_list)
-
+
#run delegated calls
for group in grouped_paths.keys():
overlord = fc.Overlord(group,
@@ -51,7 +51,7 @@
async,
nforks)
if async:
- job_id_list.append([overlord,
+ job_id_list.append([overlord,
delegation_results,
group,
True])
@@ -62,10 +62,10 @@
result_dict.update(delegation_results)
else:
result_dict.update(delegation_results[group])
-
+
#run direct calls
for minion in single_paths:
- overlord = fc.Overlord(minion,
+ overlord = fc.Overlord(minion,
async=async,
nforks=nforks)
overlord_module = getattr(overlord,module)
@@ -77,7 +77,7 @@
False])
else:
result_dict.update(results)
-
+
#poll async calls
while len(job_id_list) > 0:
for job in job_id_list:
@@ -104,5 +104,5 @@
result_dict.update(results)
job_id_list.remove(job)
time.sleep(0.1) #pause a bit so that we don't flood our minions
-
- return result_dict
+
+ return result_dict
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/modules/disk.py
^
|
@@ -0,0 +1,61 @@
+#
+# Copyright 2009
+# Greg Swift <gregswift@gmail.com>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+import func_module
+from func.minion import sub_process
+
+class DiskModule(func_module.FuncModule):
+ version = "0.0.1"
+ api_version = "0.0.1"
+ description = "Gathering disk related information"
+
+ def usage(self, partition=None):
+ """
+ Returns the results of df -PT
+ """
+ results = {}
+ # splitting the command variable out into a list does not seem to function
+ # in the tests I have run
+ command = '/bin/df -PT'
+ if (partition):
+ command += ' %s' % (partition)
+ cmdref = sub_process.Popen(command, stdout=sub_process.PIPE,
+ stderr=sub_process.PIPE, shell=True,
+ close_fds=True)
+ (stdout, stderr) = cmdref.communicate()
+ for disk in stdout.split('\n'):
+ if (disk.startswith('Filesystem') or not disk):
+ continue
+ (device, fstype, total, used, available, percentage, mount) = disk.split()
+ results[mount] = {'device':device,
+ 'total':str(total),
+ 'used':str(used),
+ 'available':str(available),
+ 'fstype':str(fstype),
+ 'percentage':int(percentage[:-1])}
+ return results
+
+ def register_method_args(self):
+ """
+ The argument export method
+ """
+ return {
+ 'usage':{
+ 'args':{
+ 'partition': {
+ 'type':'string',
+ 'optional':True,
+ 'description':'A specific partition to get usage data for',
+ }
+ },
+ 'description':'Gather disk usage information'
+ }
+ }
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/echo.py
^
|
@@ -40,30 +40,43 @@
Run a list
"""
return command
-
-
+
+
def run_list_star(self,*command):
"""
Run a star list :)
"""
return command
-
+
def run_hash(self,command):
"""
Run hash
"""
return command
-
-
-
+
+
+
def run_boolean(self,command):
"""
Run boolean
"""
return command
+ def run_str_log(self,command):
+ import time
+
+ self.run_str_log.logger.info("Starting counting logger ...")
+ time.sleep(10)
+ for i in range(50):
+ time.sleep(1)
+ self.run_str_log.logger.info("Calling method with counter is %d"%i)
+ #log the progress so other apps can poll for it
+ self.run_str_log.logger.progress(i,50)
+
+ return command
+
def register_method_args(self):
"""
Implementing the argument getter
@@ -105,7 +118,7 @@
'type':'string',
'optional':False,
'options':['first_option','second_option','third_option']
- },
+ },
},
'description':'Getting the status of the service_name'
},
@@ -140,7 +153,7 @@
},
'description':'Returns back a hash'
},
-
+
'run_boolean':{
'args':
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/modules/fact.py
^
|
@@ -0,0 +1,88 @@
+# our modules
+import func_module
+
+# =================================
+
+from func.minion.facts import minion_query
+class FactsModule(func_module.FuncModule):
+
+ version = "0.0.1"
+ api_version = "0.0.1"
+ description = "Has some useful about Facts module"
+
+ def list_fact_modules(self):
+ """
+ List facts that are availible in that system
+ """
+ return minion_query.load_facts_modules().keys()
+
+ def list_fact_methods(self,abort_on_conflict=False):
+ """
+ List facts that are availible in that system
+ """
+ methods = minion_query.load_fact_methods(abort_on_conflict)
+ if not methods.has_key('__conflict__'):
+ return methods.keys()
+ else:
+ return methods
+
+ def show_fact_module(self,module_name):
+ """
+ Show some info about fact module
+ """
+
+ for name,module in minion_query.load_facts_modules().iteritems():
+ if name == module_name:
+ return {
+ 'name':name,
+ 'description':getattr(module,"description",""),
+ 'version':getattr(module,"version","")
+ }
+ return {}
+
+ def show_fact_method(self,method_name):
+ """
+ Display info about fact method
+ """
+
+ for name,method in minion_query.load_fact_methods().iteritems():
+ if name == method_name:
+ return {
+ 'name':name,
+ 'tag':getattr(method,"tag",""),
+ }
+ return {}
+
+ def call_fact(self,method_name):
+ """
+ Sometimes we may need to get some of the facts live
+ """
+ for name,method in minion_query.load_fact_methods().iteritems():
+ if name == method_name:
+ return method()
+ return {}
+
+ def grep(self, word):
+ """
+ Get some info about facts
+ """
+ result = {
+ self.list_fact_modules:[],
+ self.list_fact_methods:[]
+ }
+
+ #search in modules
+ for m in self.list_fact_modules():
+ if m.lower().find(word)!=-1:
+ result[self.list_fact_modules].append(m)
+
+ #search in methods
+ for m in self.list_fact_methods():
+ if m.lower().find(word)!=-1:
+ val = self.call_fact(m)
+ result[self.list_fact_methods].append("%s: %s" % (m,val))
+
+
+ #the final collected stuff here
+ return result
+ grep = func_module.findout(grep)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/filetracker.py
^
|
@@ -62,7 +62,7 @@
def __save(self, filehash):
"""
Write data structure to file.
- """
+ """
config = open(CONFIG_FILE, "w+")
for (path, scan_mode) in filehash.iteritems():
@@ -70,13 +70,15 @@
config.close()
#==========================================================
-
- def track(self, file_name_globs, full_scan=0):
+
+ def track(self, file_name_globs, full_scan=0, recursive=0, files_only=0):
"""
Adds files to keep track of.
file_names can be a single filename, a list of filenames, a filename glob
or a list of filename globs
full_scan implies tracking the full contents of the file, defaults to off
+ recursive implies tracking the contents of every subdirectory
+ files_only implies tracking files that are files (not directories)
"""
filehash = self.__load()
@@ -87,11 +89,22 @@
if type(file_name_globs) == type([]):
filenameglobs = file_name_globs
+ def _recursive(original_filenames):
+ for filename in original_filenames:
+ for (dir, subdirs, subfiles) in os.walk(filename):
+ for subdir in subdirs:
+ yield "%s/%s" % (dir, subdir)
+ for subfile in subfiles:
+ yield "%s/%s" % (dir, subfile)
# expand everything that might be a glob to a list
# of names to track
for filenameglob in filenameglobs:
filenames = glob.glob(filenameglob)
+ if recursive:
+ filenames += _recursive(filenames)
+ if files_only:
+ filenames = [f for f in filenames if os.path.isfile(f)]
for filename in filenames:
filehash[filename] = full_scan
self.__save(filehash)
@@ -125,7 +138,7 @@
for use by func-inventory. If you are writting another software application, using flatten=False will
prevent the need to parse the returns.
"""
-
+
# XMLRPC feeds us strings from the CLI when it shouldn't
flatten = int(flatten)
checksum_enabled = int(checksum_enabled)
@@ -165,7 +178,7 @@
# ------ what we return depends on flatten
if flatten:
- this_result = "%s: mode=%s mtime=%s uid=%s gid=%s md5sum=%s\n" % (file_name,mode,mtime,uid,gid,hash)
+ this_result = "%s: mode=%s mtime=%s uid=%s gid=%s md5sum=%s\n" % (file_name,mode,mtime,uid,gid,hash)
else:
this_result = [file_name,mode,mtime,uid,gid,hash]
@@ -178,11 +191,11 @@
else:
this_result.append(data)
tracked_file.close()
-
+
if os.path.isdir(file_name):
if not file_name.endswith("/"):
file_name = file_name + "/"
- files = glob.glob(file_name + "*")
+ files = glob.glob(file_name + "*")
if flatten:
this_result = this_result + "*** FILES ***\n" + "\n".join(files) + "\n*** END FILES ***\n\n"
else:
@@ -198,6 +211,25 @@
#==========================================================
+ def grep(self, word):
+ """
+ Some search utility about tracked files
+ """
+ results = {self.inventory:[]}
+ tracked_files = self.inventory()
+
+ if type(tracked_files) == str and tracked_files.lower().find(word)!=-1:
+ results[self.inventory].append(tracked_files)
+
+ else:
+ for res in tracked_files:
+ if res.lower().find(word)!=-1:
+ results[self.inventory].append(res)
+
+ return results
+ grep = func_module.findout(grep)
+
+
def __sumfile(self, fobj):
"""
Returns an md5 hash for an object with read() method.
@@ -247,6 +279,18 @@
'optional':True,
'default':0,
'description':"The 0 is for off and 1 is for on"
+ },
+ 'recursive':{
+ 'type':'int',
+ 'optional':True,
+ 'default':0,
+ 'description':"The 0 is for off and 1 is for on"
+ },
+ 'files_only':{
+ 'type':'int',
+ 'optional':True,
+ 'default':0,
+ 'description':"Track only files (not dirs or links)"
}
},
'description':"Adds files to keep track of"
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/func_getargs.py
^
|
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
#
# Copyright 2008
# Louis Coilliot <louis.coilliot@wazemmes.org>
@@ -16,21 +14,21 @@
import inspect
import func_module
-class getArgs(func_module.FuncModule):
- version = "0.0.1"
- api_version = "0.0.1"
+class getArgs(func_module.FuncModule):
+ version = "0.0.1"
+ api_version = "0.0.1"
description = "Get args of methods of the class in a func module"
- def get(self, modname, methodname):
+ def get(self, modname, methodname):
"""Returns a list of args for the specified method in the class of a func module.
This is useful when register_method_args is not defined (or not properly)
- """
+ """
vtuple=pv()
pyver=vtuple[0]+'.'+vtuple[1]
- sys.path.append('/usr/lib/python'+pyver+'/site-packages/func/minion/modules/')
+ sys.path.append('/usr/lib/python'+pyver+'/site-packages/func/minion/modules/')
the_mod=__import__(modname)
- name,data=inspect.getmembers(the_mod, inspect.isclass)[0]
+ name,data=inspect.getmembers(the_mod, inspect.isclass)[0]
the_class=modname+'.'+name
c=getattr(the_mod, name)
@@ -57,4 +55,3 @@
'description':'Returns a list with the args of the method you checked'
}
}
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/func_module.py
^
|
@@ -12,15 +12,66 @@
import inspect
import os
-
from func import logger
from certmaster.config import read_config, BaseConfig
from func.commonconfig import FuncdConfig
from func.utils import is_public_valid_method
from func.minion.func_arg import * #the arg getter stuff
+from types import FunctionType
+from func.logger import LogFactory
+from func import utils
+
+
+def log_all(fn):
+ """
+ That decorator will set a logger to a method
+ which will be associated with its job_id so
+ will log only during when it is running,cool:)
+ """
+ def wrapper(*args):
+
+ if utils.should_log(args):
+ logger = LogFactory.get_instance(app_name=args[len(args)-1]['job_id'])
+
+ #remove job_id from it
+ args = list(args)
+ args.pop()
+ else:#it seems it is not a async call so will use direct logger
+ logger = LogFactory.get_instance()
+
+ setattr(wrapper,"logger",logger)
+ return fn(*args)
+
+ #a hack for get_arg_methods
+ wrapper.overriden_args = inspect.getargspec(fn)
+ try:
+ wrapper.__name__ = fn.__name__
+ except:
+ wrapper._name_ = fn.__name__
+ return wrapper
+
+
+class DecorateLogMeta(type):
+ """
+ A metaclass which simply wrapps all of the public
+ methods in a minion module class,the main purpose
+ is without breaking api adding logging capabilites
+ to methods ...
+ """
+ def __new__(meta, classname, bases, classDict):
+ newClassDict = {}
+ for attributeName, attribute in classDict.items():
+ if type(attribute) == FunctionType and not attributeName.startswith("_"):
+ attribute = log_all(attribute)
+ newClassDict[attributeName] = attribute
+
+ return type.__new__(meta, classname, bases, newClassDict)
+
+
class FuncModule(object):
+ __metaclass__ = DecorateLogMeta
# the version is meant to
version = "0.0.0"
api_version = "0.0.0"
@@ -40,6 +91,7 @@
"module_api_version" : self.__module_api_version,
"module_description" : self.__module_description,
"list_methods" : self.__list_methods,
+ "grep" : self.grep,
"get_method_args" : self.__get_method_args,
}
self.__init_options()
@@ -58,6 +110,12 @@
self.options.write(fh)
return True
+ def config_items(self):
+ l = []
+ for i in self.options.iteritems():
+ l.append(i)
+ return l
+
def register_rpc(self, handlers, module_name):
# add the internal methods, note that this means they
# can get clobbbered by subclass versions
@@ -104,7 +162,7 @@
"""
tmp_arg_dict = self.register_method_args()
- #if it is not implemeted then return empty stuff
+ #if it is not implemeted then return empty stuff
if not tmp_arg_dict:
return {}
@@ -112,7 +170,7 @@
for method in tmp_arg_dict.iterkeys():
if not hasattr(self,method):
raise NonExistingMethodRegistered("%s is not in %s "%(method,self.__class__.__name__))
-
+
#create argument validation instance
self.arg_comp = ArgCompatibility(tmp_arg_dict)
#see if all registered arguments are there
@@ -121,7 +179,7 @@
#see if the options that were used are OK..
self.arg_comp.validate_all()
- return tmp_arg_dict
+ return tmp_arg_dict
def register_method_args(self):
"""
@@ -135,4 +193,31 @@
# to know they didnt implement it
return {}
-
+
+ def grep(self,word):
+ """
+ An useful utility for searching a specified
+ word in a bunch of methods the module specifies
+ @param word : Word to be searched in method calls
+ """
+ return {}
+
+
+def findout(fn):
+ """
+ A simple decorator to send some more structured info
+ to overlord instead every module does that in their
+ find modules ...
+ """
+ def _fn_arg(*args):
+ word = args[1].strip().lower()
+ find_result = fn(args[0],word)
+ structured_result = {}
+ if find_result:
+ for k,v in find_result.iteritems():
+ if find_result[k]:
+ structured_result["".join([k.im_class.__module__,".",k.__name__])] = v
+ return structured_result
+ return {}
+
+ return _fn_arg
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/modules/getfile.py
^
|
@@ -0,0 +1,75 @@
+#!/usr/bin/python
+#Copyright (C) 2010 Louis-Frederic Coilliot
+#
+#This program is free software: you can redistribute it and/or modify
+#it under the terms of the GNU General Public License version 3 as
+# published by the Free Software Foundation.
+#
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+#
+#You should have received a copy of the GNU General Public License
+#along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Get a file, chunk by chunk. Minion side."""
+import sys
+import func_module
+try:
+ # py 2.4+
+ import hashlib
+except ImportError:
+ # py 2.3 support for RHEL4
+ import sha
+ class hashlib:
+ @staticmethod
+ def new(algo):
+ if algo == 'sha1':
+ return sha.new()
+ raise ValueError, "Bad checksum type"
+try:
+# py 2.4
+ from base64 import b64encode
+except ImportError:
+# py 2.3 for RHEL4
+ from base64 import encodestring as b64encode
+
+
+class GetFile(func_module.FuncModule):
+ """Get a file, chunk by chunk"""
+ def chunkslen(self, filename):
+ """Define the number of chunks of size=bufsize there is in the file"""
+ bufsize = 60000
+ try:
+ fic = open(filename, "r")
+ except IOError, err:
+ sys.stderr.write("Unable to open file: %s: %s\n" % (filename, err))
+ return(-1)
+ chunkslen = 0
+ while True:
+ fic.seek(bufsize*chunkslen)
+ data = fic.read(1024)
+ if not data:
+ break
+ chunkslen += 1
+ fic.close()
+ return(chunkslen)
+
+ def getchunk(self, chunknum, filename):
+ """Get a chunk of the file, after a seek to the right position"""
+ bufsize = 60000
+ try:
+ fic = open(filename, "r")
+ except IOError, err:
+ sys.stderr.write("Unable to open file: %s: %s\n" % (filename, err))
+ checksum = -1
+ return(checksum, '')
+ fic.seek(bufsize*chunknum)
+ chunk = b64encode(fic.read(bufsize))
+ mysha = hashlib.new('sha1')
+ mysha.update(chunk)
+ checksum = mysha.hexdigest()
+ fic.close()
+ return(checksum, chunk)
+
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/hardware.py
^
|
@@ -17,6 +17,9 @@
# other modules
import sys
+import traceback
+from gettext import gettext
+_ = gettext
# our modules
import sub_process
@@ -57,100 +60,139 @@
return results
def inventory(self):
- data = hw_info(with_devices=True)
+ data = self.info()
# remove bogomips because it keeps changing for laptops
# and makes inventory tracking noisy
if data.has_key("bogomips"):
del data["bogomips"]
return data
- def info(self,with_devices=True):
+ def grep(self,word):
"""
- Returns a struct of hardware information. By default, this pulls down
- all of the devices. If you don't care about them, set with_devices to
- False.
+ Find something in hardware info
"""
- return hw_info(with_devices)
+ result = {self.info:[]}
+ hw_info = self.info()
- def register_method_args(self):
- """
- Implementing the argument getter
- """
+ if hw_info == []:
+ return []
+
+ for hw_k,hw_v in hw_info.iteritems():
+ if hw_k.lower().find(word)!=-1:
+ result[self.info].append({hw_k:hw_v})
+ #we should see if the value is
+ elif type(hw_v)==str and hw_v.lower().find(word)!=-1:
+ result[self.info].append({hw_k:hw_v})
+ elif type(hw_v)==list:
+ #as it si known the hw_info has a devices
+ #in its final data and it is in format of:
+ #[{key:val}] so should check it also
+ for device in hw_v:
+ for d_k,d_v in device.iteritems():
+ if d_k.lower().find(word)!=-1:
+ result[self.info].append({d_k:d_v})
+ elif d_v.lower().find(word)!=-1:
+ result[self.info].append({d_k:d_v})
+
+ #get the final result
+ return result
+ grep = func_module.findout(grep)
- return{
- 'hal_info':{
- 'args':{},
- 'description':'Returns the output of lshal'},
- 'inventory':{
- 'args':{},
- 'description':"The inventory part"
- },
- 'info':{
- 'args':{
- 'with_devices':{
- 'type':'boolean',
- 'optional':True,
- 'default':True,
- 'description':'All devices'
- }
- },
- 'description':"A struct of hardware information"
- }
- }
-# =================================
-def hw_info(with_devices=True):
+ def info(self,with_devices=True):
+ """
+ Returns a struct of hardware information. By default, this pulls down
+ all of the devices. If you don't care about them, set with_devices to
+ False.
+ """
- # this may fail if smolt is not installed. That's ok. hal_info will
- # still work.
+ # this will fail if smolt is not installed. That's ok. hal_info will
+ # still work.
- # hack: smolt is not installed in site-packages
- sys.path.append("/usr/share/smolt/client")
- import smolt
-
- hardware = smolt.Hardware()
- host = hardware.host
-
- # NOTE: casting is needed because these are DBusStrings, not real strings
- data = {
- 'os' : str(host.os),
- 'defaultRunlevel' : str(host.defaultRunlevel),
- 'bogomips' : str(host.bogomips),
- 'cpuVendor' : str(host.cpuVendor),
- 'cpuModel' : str(host.cpuModel),
- 'numCpus' : str(host.numCpus),
- 'cpuSpeed' : str(host.cpuSpeed),
- 'systemMemory' : str(host.systemMemory),
- 'systemSwap' : str(host.systemSwap),
- 'kernelVersion' : str(host.kernelVersion),
- 'language' : str(host.language),
- 'platform' : str(host.platform),
- 'systemVendor' : str(host.systemVendor),
- 'systemModel' : str(host.systemModel),
- 'formfactor' : str(host.formfactor),
- 'selinux_enabled' : str(host.selinux_enabled),
- 'selinux_enforce' : str(host.selinux_enforce)
- }
+ # hack: smolt is not installed in site-packages
+ try:
+ sys.path.append("/usr/share/smolt/client")
+ import smolt
+ except ImportError, e:
+ errmsg = _("Import error while loading smolt module. Smolt is probably not installed. This method is useless without it.")
+ self.logger.warning(errmsg)
+ self.logger.warning("%s" % traceback.format_exc())
+ # hmm, what to return...
+ return []
+
+ hardware = smolt.Hardware()
+ host = hardware.host
+
+ # NOTE: casting is needed because these are DBusStrings, not real strings
+ data = {
+ 'os' : str(host.os),
+ 'defaultRunlevel' : str(host.defaultRunlevel),
+ 'bogomips' : str(host.bogomips),
+ 'cpuVendor' : str(host.cpuVendor),
+ 'cpuModel' : str(host.cpuModel),
+ 'numCpus' : str(host.numCpus),
+ 'cpuSpeed' : str(host.cpuSpeed),
+ 'systemMemory' : str(host.systemMemory),
+ 'systemSwap' : str(host.systemSwap),
+ 'kernelVersion' : str(host.kernelVersion),
+ 'language' : str(host.language),
+ 'platform' : str(host.platform),
+ 'systemVendor' : str(host.systemVendor),
+ 'systemModel' : str(host.systemModel),
+ 'formfactor' : str(host.formfactor),
+ 'selinux_enabled' : str(host.selinux_enabled),
+ 'selinux_enforce' : str(host.selinux_enforce)
+ }
+
+ # if no hardware info requested, just return the above bits
+ if not with_devices:
+ return data
+
+ collection = data["devices"] = []
+
+ for item in hardware.deviceIter():
+
+ (VendorID,DeviceID,SubsysVendorID,SubsysDeviceID,Bus,Driver,Type,Description) = item
+
+ collection.append({
+ "VendorID" : str(VendorID),
+ "DeviceID" : str(DeviceID),
+ "SubsysVendorID" : str(SubsysVendorID),
+ "Bus" : str(Bus),
+ "Driver" : str(Driver),
+ "Type" : str(Type),
+ "Description" : str(Description)
+ })
- # if no hardware info requested, just return the above bits
- if not with_devices:
return data
- collection = data["devices"] = []
-
- for item in hardware.deviceIter():
- (VendorID,DeviceID,SubsysVendorID,SubsysDeviceID,Bus,Driver,Type,Description) = item
- collection.append({
- "VendorID" : str(VendorID),
- "DeviceID" : str(DeviceID),
- "SubsysVendorID" : str(SubsysVendorID),
- "Bus" : str(Bus),
- "Driver" : str(Driver),
- "Type" : str(Type),
- "Description" : str(Description)
- })
+ def register_method_args(self):
+ """
+ Implementing the argument getter
+ """
+
+ return{
+ 'hal_info':{
+ 'args':{},
+ 'description':'Returns the output of lshal'},
+ 'inventory':{
+ 'args':{},
+ 'description':"The inventory part"
+ },
+ 'info':{
+ 'args':{
+ 'with_devices':{
+ 'type':'boolean',
+ 'optional':True,
+ 'default':True,
+ 'description':'All devices'
+ }
+ },
+ 'description':"A struct of hardware information"
+ }
+ }
- return data
+ # =================================
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/modules/httpd.py
^
|
@@ -0,0 +1,100 @@
+#
+# Copyright 2009
+# John Eckersberg <jeckersb@redhat.com>
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+import func_module
+
+class ServerStatusUnavailable(Exception):
+ pass
+
+class MalformedServerStatus(Exception):
+ pass
+
+class Httpd(func_module.FuncModule):
+
+ # Update these if need be.
+ version = "0.0.1"
+ api_version = "0.0.1"
+ description = "Gather information from and manipulate Apache HTTPD"
+
+ import service
+ import urllib2
+
+ HTTPD_SERVICE_NAME = 'httpd'
+
+ def server_status(self, host="localhost", request="server-status", ssl=False):
+ """
+ Returns a dictionary representing output from mod_status.
+
+ :Parameters:
+ - `host`: the hostname to query against.
+ - `request`: the location of the mod_status handler.
+ - `ssl`: whether or not to use HTTPS.
+ """
+ if ssl:
+ proto = "https"
+ else:
+ proto = "http"
+
+ try:
+ status = self.urllib2.urlopen("%s://%s/%s?auto" % (proto, host, request)).read()
+ except Exception, e:
+ raise ServerStatusUnavailable, e
+
+ result = {}
+ for line in status.split('\n'):
+ if not line:
+ continue
+ try:
+ k,v = [foo.strip() for foo in line.split(':')]
+ except ValueError:
+ raise MalformedServerStatus
+ result[k] = v
+
+ return result
+
+ def graceful(self):
+ """
+ Issue a graceful restart to the httpd service.
+ """
+ return self.service.Service()._Service__command(Httpd.HTTPD_SERVICE_NAME, 'graceful')
+
+
+ def register_method_args(self):
+ return {
+ 'graceful':{
+ 'args':{},
+ 'description':"Issue a graceful restart to the httpd service."
+ },
+ 'server_status':{
+ 'args': {'host':{
+ 'type':'string',
+ 'optional':True,
+ 'default':"localhost",
+ 'description':'hostname of the http server to check status of'
+ },
+ 'request':{
+ 'type':'string',
+ 'optional':True,
+ 'default':'server-status',
+ 'description':'path to the url server status page'
+ },
+ 'ssl': {
+ 'type':'boolean',
+ 'optional':True,
+ 'default':False,
+ 'description':'True if the server is an ssl server'
+ }
+ },
+
+
+ 'description':'Check the httpd status on a server'
+ }
+ }
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/iptables/common.py
^
|
@@ -42,7 +42,7 @@
return False
except:
return False
-
+
def set_policy(chain, policy):
return call_iptables("-P %s %s" % (chain, policy) )
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/iptables/port.py
^
|
@@ -31,7 +31,7 @@
* Drop all incoming traffic to local TCP port 80:
> func '*' call iptables.port drop_from 80
* Drop all incomming traffic to local UDP port 53 from 192.168.0.0/24:
- > func '*' call iptables.port drop_from 80 192.168.0.0/24 udp
+ > func '*' call iptables.port drop_from 80 192.168.0.0/24 udp
"""
dir=parse_dir(dir)
clear_all("-D INPUT -p %s --%sport %s -s %s -j ACCEPT" % (prot, dir, port, ip) )
@@ -49,7 +49,7 @@
* Reject all incoming traffic to local TCP port 80:
> func '*' call iptables.port reject_from 80
* Reject incomming traffic to local UDP port 53 from 192.168.0.0/24:
- > func '*' call iptables.port reject_from 80 192.168.0.0/24 udp
+ > func '*' call iptables.port reject_from 80 192.168.0.0/24 udp
"""
dir=parse_dir(dir)
clear_all("-D INPUT -p %s --%sport %s -s %s -j ACCEPT" % (prot, dir, port, ip) )
@@ -67,7 +67,7 @@
* Accept all incoming traffic to local TCP port 80:
> func '*' call iptables.port accept_from 80
* Accept incomming traffic to local UDP port 53 from 192.168.0.0/24:
- > func '*' call iptables.port accept_from 80 192.168.0.0/24 udp
+ > func '*' call iptables.port accept_from 80 192.168.0.0/24 udp
"""
dir=parse_dir(dir)
clear_all("-D INPUT -p %s --%sport %s -s %s -j DROP" % (prot, dir, port, ip) )
@@ -171,7 +171,7 @@
'prot':prot,
'dir':dir,
'port':port
-
+
}
},
'accept_from':{'args':
@@ -180,7 +180,7 @@
'prot':prot,
'dir':dir,
'port':port
-
+
}
},
'drop_to':{'args':
@@ -189,7 +189,7 @@
'prot':prot,
'dir':dir,
'port':port
-
+
}
},
'reject_to':{'args':
@@ -198,7 +198,7 @@
'prot':prot,
'dir':dir,
'port':port
-
+
}
},
'accept_to':{'args':
@@ -207,10 +207,10 @@
'prot':prot,
'dir':dir,
'port':port
-
+
}
},
-
+
}
def parse_dir(dir):
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/jboss.py
^
|
@@ -1,5 +1,5 @@
#
-# Copyright 2008
+# Copyright 2008
# Luca Foppiano <lfoppiano@byte-code.com>
# Simone Pucci <spucci@byte-code.com>
# Byte-code srl www.byte-code.com
@@ -13,27 +13,31 @@
import func_module
-from func.minion import sub_process
-from func.minion import codes
+import sub_process
+import codes
+import copyfile
import process
import networktest
import command
+import ConfigParser
+from certmaster.config import BaseConfig
+from certmaster.config import Option
from func import logger
class JBoss(func_module.FuncModule):
- version = "0.0.1"
- api_version = "0.0.1"
+ version = "0.0.3"
+ api_version = "0.0.2"
description = "JBoss monitoring and control module"
def status(self):
"""
Get jboss information
(instance name, ports, bind address, pid)
- """
+ """
processo = process.ProcessModule()
- results = processo.info("ax")
-
- logging = logger.Logger().logger
+ results = processo.info("ax")
+
+ logging = logger.Logger().logger
output = []
for items in results:
if "-Dprogram.name=run.sh" in items:
@@ -45,21 +49,20 @@
if items.__contains__("-c"):
instance = items[items.index("-c")+1]
else:
- instance = ""
+ instance = None
if items.__contains__("-b"):
address = items[items.index("-b")+1]
else:
- address = ""
+ address = None
output.append((int(items[0]),instance,address,[]))
# Retrieve information about network (netstat -tupln)
+ net_status = networktest.NetworkTest()
+ results = net_status.netstat("-tupln")
- net_status = networktest.NetworkTest()
- results = net_status.netstat("-tupln")
-
- for string in results:#netstat_list:
+ for string in results:
address = None
port = None
pid = None
@@ -70,7 +73,7 @@
except:
address_port = None
pid_name = None
-
+
if address_port != None:
try:
address = address_port.split(":")[0]
@@ -84,7 +87,7 @@
pid = int(pid_name.split("/")[0])
except:
pid = None
-
+
if pid != None:
for data in output:
if data[0] == pid:
@@ -103,8 +106,8 @@
Return values:
- instance up but not listen = (-1, instances with problem)
- - OK = (0, [])
- """
+ - OK = (0, [])
+ """
if(status == None):
data = self.status()
else:
@@ -116,7 +119,7 @@
if len(item[3]) == 0:
code = -1
result.append(item)
-
+
return (code, result)
@@ -128,15 +131,15 @@
data = self.status()
else:
data = status
-
- port = int(port)
+
+ port = int(port)
founded = []
for item in data:
for ports in item[3]:
if port == ports:
founded.append(item)
-
+
return founded
@@ -154,7 +157,7 @@
for item in data:
if item[1] == instance:
founded.append(item)
-
+
return founded
def search_by_address(self, address, status=None):
@@ -171,134 +174,70 @@
for item in data:
if item[2] == address:
founded.append(item)
-
+
return founded
- def register_method_args(self):
- """
- Implementin the method argument getter part
- """
+ def static_configuration(self):
+ '''
+ Return configuration values, read from
+ configuration file.
+ '''
+ return (self.options.jboss_home, self.options.jboss_address, self.options.jboss_instance)
+
+ def start(self):
+ '''
+ Start a jboss instance
+ '''
+ logging = logger.Logger().logger
- return {
- 'status':{
- 'args':{},
- 'description':"Get jboss information"
- },
- 'check':{
- 'args':{
- 'status':{
- 'type':'string',
- 'optional':True,
- 'description':"The status of instance to check (optional)"
- }
- },
- 'description':"Check if jboss instances works"
- },
- 'search_by_port':{
- 'args':{
- 'port':{
- 'type':'int',
- 'optional':False,
- 'min':0,
- 'max':65535,
- 'description':'The port to search for'
- },
- 'status':{
- 'type':'string',
- 'optional':True,
- 'description':"The status of instance to check (optional)"
- }
- },
- 'description':"Search instance by listening port"
- },
- 'search_by_instance':{
- 'args':{
- 'instance':{
- 'type':'string',
- 'optional':False,
- 'description':"The name of the instance"
- },
- 'status':{
- 'type':'string',
- 'optional':True,
- 'description':"The status of the instance to search (optional)"
- }
- },
- 'description':"Search instance by instance name"
- },
- 'search_by_address':{
- 'args':{
- 'address':{
- 'type':'string',
- 'optional':False,
- 'description':"The bind adress to check"
- },
- 'status':{
- 'type':'string',
- 'optional':True,
- 'description':"The status of the instance to search (optional)"
- }
- },
- 'description':"Search instance by bind address"
+ address=self.options.jboss_address
+ instance=self.options.jboss_instance
- }
- }
+ jboss_run_path=self.options.jboss_home+"/bin/run.sh"
-'''
- def start(self, address="127.0.0.1", instance="default"):
- """
- Start a jboss instance, you must specify couple
- address/instance_name. ATM __call__() in server.py
- doesn't support keywords.
- """
- # TODO: move outside this two variables
- jboss_path="/var/lib/jboss-4.2.2.GA"
- jboss_run_path=jboss_path+"/bin/run.sh"
status=self.status()
-
+
if len(self.search_by_address(address=address, status=status)) != 0:
return (-1,"Another instances listening on this address, ")
- if len(self.search_by_instance(instance=instance, status=status)) != 0:
+ if len(self.search_by_instance(instance=instance, status=status)) != 0:
return (-1,"This instances is just instanced")
- launcher ="sh "+str(jboss_run_path)+" -c "+instance+" -b "+address
+ launcher ="sh "+str(jboss_run_path)+" -c "+instance+" -b "+address+" &"
+ logging.info(launcher)
- comm = command.Command()
- comm.run(launcher)
-
- return "OK, instance "+ instance +"started on address "+address
+ comm = command.Command()
+ comm.run(launcher)
+ return "OK, instance "+ instance +" started on address "+address
- def stop(self, address="127.0.0.1"):
- """
- Stop a jboss instance, It suppose you are using
- use standard JNDI port 1099.
- By default stop che localhost bind instance
- TODO: give more flexibility
- """
- jboss_path="/var/lib/jboss-4.2.2.GA"
- jboss_sd_path=jboss_path+"/bin/shutdown.sh"
+
+ def stop(self):
+ '''
+ Stop a jboss instance, It suppose you are using
+ use standard JNDI port 1099.
+ '''
+ logging = logger.Logger().logger
+
+ address=self.options.jboss_address
+ instance=self.options.jboss_instance
+
+ jboss_sd_path=self.options.jboss_home+"/bin/shutdown.sh"
data = self.search_by_address(address)
if len(data) == 0:
return (-1, "Istance on "+ address +" not running")
-
- launcher ="sh "+str(jboss_sd_path)+" -s jnp://"+address+":1099"
+
+ launcher ="sh "+str(jboss_sd_path)+" -s jnp://"+address+":1099 &"
+ logging.info(launcher)
comm = command.Command()
- comm.run(launcher)
+ comm.run(launcher)
return "OK, stopped instance listening address "+address
-
- def version(self):
- """
- Return jboss version
- TODO: implementation, is necessary to
- find a way to get jboss version (maybe
- by parse log files)
- """
- return "version"
-'''
+ class Config(BaseConfig):
+ jboss_home=Option('/var/lib/jboss')
+ jboss_instance=Option('default')
+ jboss_address=Option('127.0.0.1')
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/jobs.py
^
|
@@ -16,9 +16,70 @@
from func import jobthing
import func_module
-
+NUM_OF_LINES = 50
# =================================
+# copyright 2004 Michael D. Stenner <mstenner@ece.arizona.edu>
+# license: LGPL
+
+class xreverse:
+ def __init__(self, file_object, buf_size=1024*8):
+ self.fo = fo = file_object
+ fo.seek(0, 2) # go to the end of the file
+ self.pos = fo.tell() # where we are
+ self.buffer = '' # data buffer
+ self.lbuf = [] # buffer for parsed lines
+ self.done = 0 # we've read the last line
+ self.jump = -1 * buf_size
+
+ while 1:
+ try: fo.seek(self.jump, 1)
+ except IOError: fo.seek(0)
+ new_position = fo.tell()
+ new = fo.read(self.pos - new_position)
+ fo.seek(new_position)
+ self.pos = new_position
+
+ self.buffer = new + self.buffer
+ if '\n' in new: break
+ if self.pos == 0: return self.buffer
+
+ nl = self.buffer.split('\n')
+ nlb = [ i + '\n' for i in nl[1:-1] ]
+ if not self.buffer[-1] == '\n': nlb.append(nl[-1])
+ self.buffer = nl[0]
+ self.lbuf = nlb
+
+ def __iter__(self): return self
+
+ def next(self):
+ try:
+ return self.lbuf.pop()
+ except IndexError:
+ fo = self.fo
+ while 1:
+ #get the next chunk of data
+ try: fo.seek(self.jump, 1)
+ except IOError: fo.seek(0)
+ new_position = fo.tell()
+ new = fo.read(self.pos - new_position)
+ fo.seek(new_position)
+ self.pos = new_position
+
+ nl = (new + self.buffer).split('\n')
+ self.buffer = nl.pop(0)
+ self.lbuf = [ i + '\n' for i in nl ]
+
+ if self.lbuf: return self.lbuf.pop()
+ elif self.pos == 0:
+ if self.done:
+ raise StopIteration
+ else:
+ self.done = 1
+ return self.buffer + '\n'
+
+
+
class JobsModule(func_module.FuncModule):
version = "0.0.1"
@@ -33,3 +94,57 @@
"""
return jobthing.job_status(job_id)
+ def tail_output(self,minion_job_id):
+ """
+ A tail method which will tail the log files
+ that will track their output ....
+ """
+
+ from func.minion import sub_process
+ from certmaster.config import read_config
+ from func.commonconfig import FuncdConfig
+ from func.logger import config_file
+ import os
+ import subprocess
+
+
+
+ config = read_config(config_file, FuncdConfig)
+ method_log_dir = config.method_log_dir
+ method_log_file = os.path.join(method_log_dir,minion_job_id)
+ cmd= subprocess.Popen(
+ args=["tail","-n",str(NUM_OF_LINES),method_log_file],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell = False,
+ )
+
+ return cmd.communicate()
+
+
+ def get_progress(self,minion_job_id):
+ """
+ Get the log file and parse the progress part
+ to be polled on overlord
+ """
+ from certmaster.config import read_config
+ from func.commonconfig import FuncdConfig
+ from func.logger import config_file
+ import os
+ import re
+
+ config = read_config(config_file, FuncdConfig)
+ method_log_dir = config.method_log_dir
+ method_log_file = os.path.join(method_log_dir,minion_job_id)
+
+ reco=re.compile("Progress report (\d+)/(\d+) completed")
+ fo = file(method_log_file)
+ for line in xreverse(fo):
+ tmp = re.search(reco,line)
+ if tmp:
+ current = tmp.group(1)
+ all = tmp.group(2)
+ return (int(current),int(all))
+
+ #that tells that we couldnt found any report there
+ return(0,0)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/mount.py
^
|
@@ -26,7 +26,7 @@
def list(self):
cmd = sub_process.Popen(["/bin/cat", "/proc/mounts"], executable="/bin/cat", stdout=sub_process.PIPE, shell=False, close_fds=True)
data = cmd.communicate()[0]
-
+
mounts = []
lines = [l for l in data.split("\n") if l] #why must you append blank crap?
@@ -43,7 +43,7 @@
def mount(self, device, dir, type="auto", options=None, createdir=False):
cmdline = ["/bin/mount", "-t", type]
- if options:
+ if options:
cmdline.append("-o")
cmdline.append(options)
cmdline.append(device)
@@ -58,7 +58,7 @@
return True
else:
return False
-
+
def umount(self, dir, killall=False, force=False, lazy=False):
# succeed if its not mounted
if not os.path.ismount(dir):
@@ -85,6 +85,23 @@
return self.list()
+ def grep(self,word):
+ """
+ Get some info about mounted devices
+ """
+ results = {self.list:[]}
+ list_res = self.list()
+
+ if list_res:
+ for list_dict in list_res:
+ for m_k,m_v in list_dict.iteritems():
+ if m_k.lower().find(word)!=-1 or m_v.lower().find(word)!=-1:
+ results[self.list].append({m_k:m_v})
+ return results
+ grep = func_module.findout(grep)
+
+
+
def register_method_args(self):
"""
Implementing the method arg getter
@@ -156,6 +173,6 @@
'description':"To flatten check."
}
},
- 'description':"Th einventory part of that module"
+ 'description':"The inventory part of that module"
}
}
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/nagios-check.py
^
|
@@ -28,7 +28,7 @@
"""
nagios_path='/usr/lib/nagios/plugins'
command = '%s/%s' % (nagios_path, check_command)
-
+
cmdref = sub_process.Popen(command.split(),stdout=sub_process.PIPE,stderr=sub_process.PIPE, shell=False, close_fds=True)
data = cmdref.communicate()
return (cmdref.returncode, data[0], data[1])
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/netapp/common.py
^
|
@@ -28,7 +28,7 @@
cmd = sub_process.Popen(cmdline,
executable=SSH,
stdin=sub_process.PIPE,
- stdout=sub_process.PIPE,
+ stdout=sub_process.PIPE,
stderr=sub_process.PIPE,
shell=False)
@@ -46,4 +46,3 @@
return True
else:
raise NetappCommandError, output
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/netapp/options.py
^
|
@@ -50,6 +50,3 @@
output = ssh(filer, cmd_opts)
# should return no output (maybe a space or newline)
return check_output("^\s*$", output)
-
-
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/netapp/snap.py
^
|
@@ -40,7 +40,7 @@
cmd_opts = ['snap', 'delete', vol, snap]
output = ssh(filer, cmd_opts)
return check_output(regex, output)
-
+
def list(self, filer, vol):
"""
TODO: Document me ...
@@ -61,7 +61,7 @@
'optional':False,
'description':"The name of the snapshot"
}
-
+
filer = {
'type':'string',
'optional':False,
@@ -93,4 +93,3 @@
'description':""
}
}
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/netapp/vol/__init__.py
^
|
@@ -31,7 +31,7 @@
cmd_opts = ['vol', 'create', vol, aggr, size]
output = ssh(filer, cmd_opts)
return check_output(regex, output)
-
+
def destroy(self, filer, vol):
"""
TODO: Document me ...
@@ -75,7 +75,7 @@
tokens = line.split()
if len(tokens) >= 2 and tokens[1] in ('online', 'offline', 'restricted'):
if current_vol: vols.append(current_vol)
- current_vol = {'name': tokens[0],
+ current_vol = {'name': tokens[0],
'state': tokens[1],
'status': [foo for foo in tokens[2:] if '=' not in foo],
'options': [foo for foo in tokens[2:] if '=' in foo]}
@@ -99,7 +99,7 @@
stat_regex = """vol size: Flexible volume .* has size .*."""
resize_regex = """vol size: Flexible volume .* size set to .*."""
cmd_opts = ['vol', 'size', vol]
-
+
if delta:
cmd_opts.append(delta)
output = ssh(filer, cmd_opts)
@@ -136,7 +136,7 @@
'optional':False,
'description':"The name of the volume"
}
-
+
filer = {
'type':'string',
'optional':False,
@@ -144,7 +144,7 @@
}
return {
-
+
'create':{
'args':{
'filer':filer,
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/netapp/vol/clone.py
^
|
@@ -31,7 +31,7 @@
cmd_opts = ['vol', 'clone', 'create', vol, '-b', parent, snap]
output = ssh(filer, cmd_opts)
return check_output(regex, output)
-
+
def split(self, filer, vol):
"""
TODO: Document me ...
@@ -42,7 +42,7 @@
output = ssh(filer, cmd_opts)
return check_output(regex, output)
-
+
def register_method_args(self):
"""
Implementing netapp.clone export
@@ -52,13 +52,13 @@
'optional':False,
'description':"The name of the volume"
}
-
+
filer = {
'type':'string',
'optional':False,
'description':"Resolvable name of the target filer"
}
-
+
snap = {
'type':'string',
'optional':False,
@@ -85,5 +85,5 @@
'vol':vol
},
'description':"Split the vol"
- }
+ }
}
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/networktest.py
^
|
@@ -29,12 +29,12 @@
self.__args_to_list(args))
def traceroute(self, *args):
- return self.__run_command('/bin/traceroute',
- self.__args_to_list(args))
+ return self.__run_command('/bin/traceroute',
+ self.__args_to_list(args))
def dig(self, *args):
- return self.__run_command('/usr/bin/dig',
- self.__args_to_list(args))
+ return self.__run_command('/usr/bin/dig',
+ self.__args_to_list(args))
def isportopen(self, host, port):
# FIXME: the return api here needs some work... -akl
@@ -54,6 +54,23 @@
sock.close()
return [0, "connection to %s:%s succeeded" % (host, port)]
+ def grep(self, word):
+ """
+ Grep some info from grep test
+ especially netstat is very suitable
+ for that purpose ...
+ """
+ results = {
+ self.netstat:[]
+ }
+ netstat_result = self.netstat()
+ for res in netstat_result:
+ if res.lower().find(word)!=-1:
+ results[self.netstat].append(res)
+
+ return results
+ grep = func_module.findout(grep)
+
def __args_to_list(self, args):
return [arg for arg in args]
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/overlord.py
^
|
@@ -12,6 +12,7 @@
import func.overlord.client as fc
from certmaster import certmaster as certmaster
from certmaster import utils as cm_utils
+from func import utils as func_utils
class OverlordModule(func_module.FuncModule):
@@ -33,9 +34,11 @@
current_minions.append(minion) #add it to the list
else:
cm = certmaster.CertMaster()
+ if cm == None: # this is minion only setup
+ return maphash
current_minions = cm.get_signed_certs()
for current_minion in current_minions:
- if current_minion in cm_utils.get_hostname():
+ if current_minion in func_utils.get_hostname_by_route():
maphash[current_minion] = {} #prevent infinite recursion
else:
next_hop = fc.Overlord(current_minion)
@@ -63,5 +66,3 @@
'description':"Builds a recursive map of the minions currently assigned to this minion overlord"
}
}
-
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/process.py
^
|
@@ -37,8 +37,8 @@
flags.replace(";", "") # prevent stupidity
- cmd = sub_process.Popen(["/bin/ps", flags], executable="/bin/ps",
- stdout=sub_process.PIPE,
+ cmd = sub_process.Popen(["/bin/ps", flags], executable="/bin/ps",
+ stdout=sub_process.PIPE,
stderr=sub_process.PIPE,
shell=False)
@@ -157,9 +157,9 @@
shareds[cmd]=shared
cmds[cmd]=cmds.setdefault(cmd,0)+private
if count.has_key(cmd):
- count[cmd] += 1
+ count[cmd] += 1
else:
- count[cmd] = 1
+ count[cmd] = 1
#Add shared mem for each program
total=0
@@ -181,9 +181,9 @@
def cmd_with_count(cmd, count):
if count>1:
- return "%s (%u)" % (cmd, count)
+ return "%s (%u)" % (cmd, count)
else:
- return cmd
+ return cmd
for cmd in sort_list:
results.append([
@@ -203,12 +203,12 @@
if pid == "0":
raise codes.FuncException("Killing pid group 0 not permitted")
if signal == "":
- # this is default /bin/kill behaviour,
+ # this is default /bin/kill behaviour,
# it claims, but enfore it anyway
signal = "-TERM"
if signal[0] != "-":
signal = "-%s" % signal
- rc = sub_process.call(["/bin/kill",signal, pid],
+ rc = sub_process.call(["/bin/kill",signal, pid],
executable="/bin/kill", shell=False,
close_fds=True)
print rc
@@ -216,11 +216,43 @@
def pkill(self,name,level=""):
# example killall("thunderbird","-9")
- rc = sub_process.call(["/usr/bin/pkill", name, level],
+ rc = sub_process.call(["/usr/bin/pkill", name, level],
executable="/usr/bin/pkill", shell=False,
close_fds=True)
return rc
+ def loadavg(self):
+ return open("/proc/loadavg", "r").readline().strip().split(" ")
+
+ def grep(self, word):
+ """
+ Some grep info about :
+ info and mem methods it may be
+ useful for troubleshooting ...
+ """
+ results = {
+ self.info:[],
+ self.mem:[]
+ }
+ #info result comes here
+ info_result = self.info()
+ for process_pack in info_result:
+ tmp_str = " ".join(process_pack)
+ if tmp_str.lower().find(word)!= -1 :
+ results[self.info].append(tmp_str)
+
+ #mem results will come here
+ mem_result = self.mem()
+ for m in mem_result:
+ tmp_str = " ".join(m)
+ if tmp_str.lower().find(word)!= -1 :
+ results[self.mem].append(tmp_str)
+
+
+ return results
+ grep = func_module.findout(grep)
+
+
def register_method_args(self):
"""
Implementing the argument getter
@@ -270,5 +302,9 @@
}
},
"description":"Kill an app with supplying a name and level"
- }
+ },
+ 'loadavg':{
+ 'args':{},
+ 'description':"Returns a list of loadavg details."
+ },
}
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/modules/pullfile.py
^
|
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+#
+# pullfile.py
+#
+# Copyright 2009, Stone-IT
+# L.S. Keijser <keijser@stone-it.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301 USA
+
+
+from func.minion import codes
+import func_module, os
+from urllib2 import Request, urlopen, URLError
+
+class PullFile(func_module.FuncModule):
+
+ version = "0.0.2"
+ api_version = "0.0.1"
+ description = "Download remote file and save locally"
+
+ def update(self, args):
+ for inFile, outFile in args.iteritems():
+ try:
+ req = Request(inFile)
+ webFile = urlopen(req)
+ except URLError:
+ raise codes.FuncException("Error retrieving file")
+ try:
+ f = open(outFile, 'w')
+ except IOError:
+ raise codes.FuncException("Error opening local file")
+ f.write(webFile.read())
+ f.close()
+ webFile.close()
+ return 0
+
+ def get(self, inFile, outFile):
+ try:
+ req = Request(inFile)
+ webFile = urlopen(req)
+ except URLError:
+ raise codes.FuncException("Error retrieving file")
+ try:
+ f = open(outFile, 'w')
+ except IOError:
+ raise codes.FuncException("Error opening local file")
+ f.write(webFile.read())
+ f.close()
+ webFile.close()
+ return 0
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/reboot.py
^
|
@@ -43,4 +43,3 @@
'description':"Rebooting the minions"
}
}
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/rpms.py
^
|
@@ -1,5 +1,7 @@
# Copyright 2007, Red Hat, Inc
# Michael DeHaan <mdehaan@redhat.com>
+# Copyright 2009
+# Milton Paiva Neto <milton.paiva@gmail.com>
#
# This software may be freely redistributed under the terms of the GNU
# general public license.
@@ -9,7 +11,6 @@
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import func_module
-import rpm
class RpmModule(func_module.FuncModule):
@@ -22,58 +23,81 @@
Returns information on all installed packages.
By default, 'flatten' is passed in as True, which makes printouts very
clean in diffs for use by func-inventory. If you are writting another
- software application, using flatten=False will prevent the need to
+ software application, using flatten=False will prevent the need to
parse the returns.
"""
- # I have not been able to get flatten=False to work if there
- # is more than 491 entries in the dict -- ashcrow
- ts = rpm.TransactionSet()
-# mi = ts.dbMatch()
-# results = []
-# for hdr in mi:
-# name = hdr['name']
-# epoch = (hdr['epoch'] or 0)
-# version = hdr['version']
-# release = hdr['release']
-# arch = hdr['arch']
-# if flatten:
-# results.append("%s %s %s %s %s" % (name, epoch, version,
-# release, arch))
-# else:
-# results.append([name, epoch, version, release, arch])
+ return self.glob('', flatten)
+
+ def grep(self, word):
+ """
+ Grep some info from packages we got from
+ inventory especially
+ """
+ results = {self.inventory:[]}
+ inventory_res = self.inventory()
+
+ for res in inventory_res:
+ if res.lower().find(word)!= -1:
+ results[self.inventory].append(res)
+ return results
+
+ grep = func_module.findout(grep)
+
+ def verify(self, pattern='', flatten=True):
+ """
+ Returns information on the verified package(s).
+ """
results = []
+ import yum
+ for rpm in self.glob(pattern, False):
+ name = rpm[0]
+ yb = yum.YumBase()
+ pkgs = yb.rpmdb.searchNevra(name)
+ for pkg in pkgs:
+ errors = pkg.verify()
+ for fn in errors.keys():
+ for prob in errors[fn]:
+ if flatten:
+ results.append('%s %s %s' % (name, fn, prob.message))
+ else:
+ results.append([name, fn, prob.message])
return results
def glob(self, pattern, flatten=True):
"""
Return a list of installed packages that match a pattern
"""
+ import rpm
ts = rpm.TransactionSet()
mi = ts.dbMatch()
results = []
if not mi:
- return
- mi.pattern('name', rpm.RPMMIRE_GLOB, pattern)
+ return results
+ if (pattern != ''):
+ mi.pattern('name', rpm.RPMMIRE_GLOB, pattern)
for hdr in mi:
name = hdr['name']
+ # not all packages have an epoch
epoch = (hdr['epoch'] or 0)
version = hdr['version']
release = hdr['release']
# gpg-pubkeys have no arch
arch = (hdr['arch'] or "")
-
if flatten:
+ # flatten forms a simple text list separated by spaces
results.append("%s %s %s %s %s" % (name, epoch, version,
- release, arch))
+ release, arch))
else:
+ # Otherwise we return it as a list
results.append([name, epoch, version, release, arch])
+ results.sort()
return results
+
def register_method_args(self):
"""
Implementing the method argument getter
"""
-
return {
'inventory':{
'args':{
@@ -86,6 +110,17 @@
},
'description':"Returns information on all installed packages"
},
+ 'verify':{
+ 'args':{
+ 'flatten':{
+ 'type':'boolean',
+ 'optional':True,
+ 'default':True,
+ 'description':"Print clean in difss"
+ }
+ },
+ 'description':"Returns information on the verified package(s)"
+ },
'glob':{
'args':{
'pattern':{
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/service.py
^
|
@@ -25,12 +25,12 @@
description = "Allows for service control via func."
def __command(self, service_name, command):
-
- service_name = service_name.strip() # remove useless spaces
- filename = os.path.join("/etc/rc.d/init.d/",service_name)
+ service_name = service_name.strip() # remove useless spaces
+
+ filename = os.path.join("/etc/init.d/",service_name)
if os.path.exists(filename):
- return sub_process.call(["/sbin/service", service_name, command], close_fds=True)
+ return sub_process.call(["/sbin/service", service_name, command], close_fds=True, env={ 'LANG':'C' })
else:
raise codes.FuncException("Service not installed: %s" % service_name)
@@ -55,38 +55,78 @@
"enabled" : self.get_enabled()
}
+ def grep(self,word):
+ """
+ Dig for some useful info in that module ...
+ """
+ final_dict = {self.get_running:[],
+ self.get_enabled:[]
+ }
+ running = self.get_running()
+ enabled = self.get_enabled()
+
+ #get enabled ones
+ for e in enabled:
+ if e[0].lower().find(word)!=-1:
+ final_dict[self.get_enabled].append(e)
+
+ #get running ones
+ for e in running:
+ if e[0].lower().find(word)!=-1:
+ final_dict[self.get_running].append(e)
+
+ return final_dict
+ grep = func_module.findout(grep)
+
+
def get_enabled(self):
"""
Get the list of services that are enabled at the various runlevels. Xinetd services
only provide whether or not they are running, not specific runlevel info.
"""
- chkconfig = sub_process.Popen(["/sbin/chkconfig", "--list"], stdout=sub_process.PIPE, close_fds=True)
+ chkconfig = sub_process.Popen(["/sbin/chkconfig", "--list"], stdout=sub_process.PIPE, close_fds=True, env={ "LANG": "C" })
data = chkconfig.communicate()[0]
results = []
for line in data.split("\n"):
if line.find("0:") != -1:
- # regular services
- tokens = line.split()
- results.append((tokens[0],tokens[1:]))
+ # regular services
+ tokens = line.split()
+ results.append((tokens[0],tokens[1:]))
elif line.find(":") != -1 and not line.endswith(":"):
- # xinetd.d based services
- tokens = line.split()
- tokens[0] = tokens[0].replace(":","")
- results.append((tokens[0],tokens[1]))
+ # xinetd.d based services
+ tokens = line.split()
+ tokens[0] = tokens[0].replace(":","")
+ results.append((tokens[0],tokens[1]))
return results
def get_running(self):
"""
Get a list of which services are running, stopped, or disabled.
"""
- chkconfig = sub_process.Popen(["/sbin/service", "--status-all"], stdout=sub_process.PIPE, close_fds=True)
- data = chkconfig.communicate()[0]
results = []
- for line in data.split("\n"):
- if line.find(" is ") != -1:
- tokens = line.split()
- results.append((tokens[0], tokens[-1].replace("...","")))
+
+ # Get services
+ services = self.get_enabled()
+
+ init_return_codes = { 0: 'running', 1: 'dead', 2:'locked', 3:'stopped' }
+
+ for service in services:
+ filename = os.path.join("/etc/init.d/",service[0])
+ # Run status for service
+ try:
+ init_exec = sub_process.Popen([filename, "status"], stdout=sub_process.PIPE, close_fds=True, env={ "LANG": "C" })
+ except Exception, e:
+ raise codes.FuncException("Service status error %s on initscript %s" % (e, filename))
+
+ # Get status output
+ data = init_exec.communicate()[0]
+
+ # Wait for command to complete
+ init_exec.wait()
+
+ # Append the result, service name, return status and status output
+ results.append((service[0], init_return_codes[init_exec.returncode], data))
return results
def register_method_args(self):
@@ -106,7 +146,7 @@
'get_enabled':{'args':{}},
'inventory':{'args':{}},
'status':{'args':{
- 'service_name':service_name,
+ 'service_name':service_name,
},
'description':'Getting the status of the service_name'
},
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/smart.py
^
|
@@ -46,6 +46,20 @@
return (cmd.returncode, results)
+ def grep(self, word):
+ """
+ grep some info from grep
+ """
+ results = {self.info:[]}
+ info_res = self.info()[1]
+
+ if info_res:
+ for res in info_res:
+ if res.lower().find(word)!=-1:
+ results[self.info].append(res)
+ return results
+ grep = func_module.findout(grep)
+
def register_method_args(self):
"""
Implementing method argument getter
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/snmp.py
^
|
@@ -28,7 +28,7 @@
Runs an snmpget on a specific oid returns the output of the call.
"""
command = '%s -c %s %s %s' % (base_snmp_command, rocommunity, hostname, oid)
-
+
cmdref = sub_process.Popen(command.split(), stdout=sub_process.PIPE, stderr=sub_process.PIPE, shell=False, close_fds=True)
data = cmdref.communicate()
return (cmdref.returncode, data[0], data[1])
@@ -37,7 +37,7 @@
"""
Implementing the argument getter
"""
-
+
return {
'get':{
'args':{
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/sysctl.py
^
|
@@ -31,6 +31,21 @@
def set(self, name, value):
return self.__run("/sbin/sysctl -w %s=%s" % (name, value))
+ def grep(self, word):
+ """
+ Grep info from sysctl
+ """
+ results = {self.list:[]}
+ sys_res = self.list()#the ist fo sysctl
+
+ for res in sys_res:
+ if res.lower().find(word)!=-1:
+ results[self.list].append(res)
+
+ return results
+ grep = func_module.findout(grep)
+
+
def register_method_args(self):
"""
Implementing the method argument getter
@@ -63,7 +78,7 @@
'optional':False,
'description':"The name value to be set."
}
-
+
},
'description':"Use this option when you want to change a sysctl setting"
}
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/test.py
^
|
@@ -15,7 +15,7 @@
bool_option = BoolOption(True)
float_option = FloatOption(3.14159)
testvalue = 'this is a test. It is only a test'
-
+
def add(self, numb1, numb2):
return numb1 + numb2
@@ -64,25 +64,26 @@
"""
Returns the options config
"""
- return self.options
+ return self.config_items()
def config_save(self):
- """
- Saves the options config
- """
- self.save_config()
- return self.options
+ """
+ Saves the options config
+ """
+ self.save_config()
+ # lame, need to convert the object into a marshaable ds
+ return self.config_items()
def config_set(self, key_name, value):
setattr(self.options,key_name, value)
self.save_config()
- return self.options
+ return self.config_items()
def config_get(self, key_name):
- return getattr(self.options, key_name)
+ return getattr(self.options, key_name)
def config_get_test(self):
- return self.options.testvalue
+ return self.options.testvalue
def register_method_args(self):
"""
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/minion/modules/users.py
^
|
@@ -0,0 +1,1743 @@
+###############################################################################
+#
+# Func Users and Group management module.
+# Author: Gregory Masseau <gjmasseau@learn.senecac.on.ca>
+#
+###############################################################################
+#
+# Legal:
+#
+# This software may be freely redistributed under the terms of the GNU
+# general public license.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+###############################################################################
+#
+# Changelog:
+#
+# 0.5:
+# - (Feature) Implemented register_method_args.
+# - (External) Unit tests added to test_client.py.
+#
+# 0.4:
+# - (Feature) Password alteration method added.
+# - (Feature) Pluralized methods for all applicable singular methods.
+# - (Misc) General API cleanup.
+#
+# 0.3:
+# - (Feature) Methods added to create, delete, and modify groups.
+# - (Feature) Methods added to create, delete, and modify users.
+# - (Feature) Manage the memberships of users within groups
+#
+# 0.2:
+# - (Feature) Most informational methods are complete and working for both
+# users and groups at this point.
+#
+# 0.1:
+# - (Feature) Initial release, supporting only some informational query
+# messages regarding user accounts on the target system.
+#
+###############################################################################
+"""
+User management module for func.
+"""
+
+from func.minion.modules import func_module
+from func.minion import sub_process
+import pwd
+import grp
+from os import system
+
+class UsersModule(func_module.FuncModule):
+ version = "0,5"
+ api_version = "0,5"
+ description = "Nearly complete."
+
+# INTERNALLY USED METHODS #####################################################
+ def __command(self,*list):
+ """
+ This method is used internally by this module when invoking external commands. It should remain private.
+ This method should probably be improved to check the elems for suspicious characters like ';','&&','||'.
+ """
+ cmd = ''
+ for elem in list:
+ if elem == '':
+ pass
+ else:
+ if type(elem) == str:
+ ret = str(elem.replace("'", ""))
+ else:
+ ret = str(elem)
+ cmd = cmd + " '" + ret + "'"
+
+ if system(cmd+" 2>/dev/null 1>/dev/null"):
+ return False
+ else:
+ return True
+
+
+ def __plural(self,f):
+ return (lambda xs: map(f,xs))
+
+# GROUPADD METHODS ############################################################
+ def __groupadd(self,group,*switches):
+ """Constructs the proper argument sequence for self.__command and returns it's result."""
+ if self.group_exists(group):
+ return False
+ else:
+ return self.__command("/usr/sbin/groupadd",group,*switches)
+
+ def group_add(self,group,*gid):
+ """Adds a group on the target system(s)."""
+ if gid:
+ if self.gid_exists(gid[0]):
+ return False
+ else:
+ print str(gid[0]) + "<-"
+ return self.__groupadd(group,"-g",gid[0])
+ else:
+ return self.__groupadd(group)
+
+ def groups_add(self,*groups):
+ """Adds a series of groups on the target system(s)."""
+ return self.__plural(self.group_add)(groups)
+
+ def group_add_non_unique(self,group,*gid):
+ """Adds a group on the target system(s)."""
+ if gid:
+ if self.gid_exists(gid[0]):
+ return False
+ else:
+ return self.__groupadd(group,"-o","-g",gid[0])
+ else:
+ return self.__groupadd(group,"-o")
+
+# GROUPDEL METHODS ############################################################
+ def __groupdel(self,group,*switches):
+ """Constructs the proper argument sequence for self.__command and returns it's result."""
+ if self.group_exists(group):
+ return self.__command("/usr/sbin/groupdel",group,*switches)
+ else:
+ return False
+
+ def group_del(self,group):
+ """Deletes a group on the target system(s)."""
+ return self.__groupdel(group)
+
+ def groups_del(self,*groups):
+ """Adds a series of groups."""
+ return self.__plural(self.group_del)(groups)
+
+# GROUPMOD METHODS ############################################################
+ def __groupmod(self,group,*switches):
+ """Constructs the proper argument sequence for self.__command and returns it's result."""
+ if self.group_exists(group):
+ if switches:
+ return self.__command("/usr/sbin/groupmod",group,*switches)
+ else:
+ return self.__command("/usr/sbin/groupmod",group)
+ else:
+ return False
+
+ def group_set_gid_non_unique(self,group,gid):
+ """Changes the GID of the specified group on the target system(s), allowing non-unique GID."""
+ return self.__groupmod(group,"-o","-g",gid)
+
+ def group_set_gid(self,group,gid):
+ """Changes the GID of the specified group on the target system(s)."""
+ if self.gid_exists(gid):
+ return False
+ else:
+ return self.__groupmod(group,"-g",gid)
+
+ def group_set_groupname(self,group,groupname):
+ """Changes the name of the specified group on the target system(s)."""
+ if self.group_exists(groupname):
+ return False
+ else:
+ return self.__groupmod(group,"-n",groupname)
+
+# USERADD METHODS #############################################################
+ def __useradd(self,user,*switches):
+ """Constructs the proper argument sequence for self.__command and returns it's result."""
+ if self.user_exists(user):
+ return False
+ else:
+ return self.__command("/usr/sbin/useradd",user)
+
+ def user_add(self,user):
+ """Adds a user on the target system(s)."""
+ return self.__useradd(user)
+
+ def users_add(self,*users):
+ """Adds a series of users on the target system(s)."""
+ return self.__plural(self.user_add)(users)
+
+# USERDEL METHODS #############################################################
+ def __userdel(self,user,*switches):
+ """Constructs the proper argument sequence for self.__command and returns it's result."""
+ if self.user_exists(user):
+ return self.__command("/usr/sbin/userdel",user,*switches)
+ else:
+ return False
+
+ def user_del(self,user,*options):
+ """Deletes a user on the target system(s)."""
+ switches=[]
+ if options:
+ for option in options:
+ if option == 'force':
+ switches.append('-f')
+ elif option == 'remove':
+ switches.append('-r')
+ else:
+ return False
+ return self.__userdel(user,*switches)
+
+ def users_del(self,*users):
+ """Deletes a series of users on the target system(s)."""
+ return self.__plural(self.user_del)(users)
+
+# USERMOD METHODS #############################################################
+ def __usermod(self,user,*switches):
+ """Constructs the proper argument sequence for self.__command and returns it's result."""
+ if self.user_exists(user):
+ command = []
+ if switches:
+ command = list(switches)
+ command.append(user)
+ return self.__command("/usr/sbin/usermod",*command)
+ else:
+ return False
+
+ def user_lock(self,user):
+ """Locks a user account on the target system(s)."""
+ return self.__usermod(user,"-L")
+
+ def users_lock(self,*users):
+ """Locks a series of user accounts on the target system(s)."""
+ return self.__plural(self.user_lock)(users)
+
+ def user_set_shell(self,user,shell):
+ """Set a specified user's shell on the target system(s)."""
+ return self.__usermod(user,"-s",shell)
+
+ def users_set_shell(self,shell,*users):
+ """Set a specified list of users' shell on the target system(s)."""
+ return self.__plural(lambda u: self.user_set_shell(u,shell))(users)
+
+ def user_set_home(self,user,home):
+ """Change (but don't move the contents of) a user's home folder on the target system(s)."""
+ return self.__usermod(user,"-d",home)
+
+ def user_set_loginname(self,user,loginname):
+ """Change a user's login name on the target system(s)."""
+ return self.__usermod(user,"-l",loginname)
+
+ def user_set_comment(self,user,comment):
+ """Change the value of a user's GECOS field -- maybe replace this with a field sensitive version?"""
+ return self.__usermod(user,"-c",comment)
+
+ def user_set_expiredate(self,user,expiredate):
+ """Set the expity date for a specified user on the target system(s)."""
+ return self.__usermod(user,"-e",expiredate)
+
+ def users_set_expiredate(self,expiredate,*users):
+ """Set a specified list of users' expiry date on the target system(s)."""
+ return self.__plural(lambda u: self.user_set_expiredate(u,expiredate))(users)
+
+ def user_set_uid_non_unique(self,user,uid):
+ """Change a user's UID, allowing non-unique UIDs on the target system(s)."""
+ return self.__usermod(user,"-u",uid,"-o")
+
+ def user_set_uid(self,user,uid):
+ """Change a user's UID on the target system(s)."""
+ return self.__usermod(user,"-u",uid)
+
+ def user_set_inactive(self,user,inactive):
+ """Set the inactivity timer on a user on the target system(s)."""
+ return self.__usermod(user,"-f",inactive)
+
+ def users_set_inactive(self,inactive,*users):
+ """Set the inactivity timer on a series of users on the target system(s)."""
+ return self.__plural(lambda u: self.user_set_inactive(u,inactive))(users)
+
+ def user_set_gid(self,user,gid):
+ """Change a users primary group by GID on the target system(s)."""
+ if self.gid_exists(gid):
+ return self.__usermod(user,"-g",gid)
+ else:
+ return False
+
+ def users_set_gid(self,gid,*users):
+ """Set a specified list of users' primary GID on the target system(s)."""
+ return self.__plural(lambda u: self.user_set_gid(u,gid))(users)
+
+ def user_move_home(self,user,home):
+ """Changes and moves a users home folder on the target system(s)."""
+ return self.__usermod(user,"-d",home,"-m")
+
+ def user_unlock(self,user):
+ """Unlocks a specified user account on the target system(s)."""
+ return self.__usermod(user,"-U")
+
+ def users_unlock(self,*users):
+ """Unlocks a specified list of users' accounts on the target system(s)."""
+ return self.__plural(self.user_unlock)(users)
+
+ def user_add_to_group(self,user,group):
+ """Appends the user to a specified group on the target system(s)."""
+ if self.group_exists(group):
+ return self.__usermod(user,"-aG",group)
+ else:
+ return False
+
+ def users_add_to_group(self,group,*users):
+ """Appends the list of users to a specified group on the target system(s)."""
+ return self.__plural(lambda u: self.user_add_to_group(u,group))(users)
+
+ def user_set_group(self,user,group):
+ """Changes a users primary group on the target system(s)."""
+ if self.group_exists(group):
+ gid = self.group_to_gid(group)
+ return self.__usermod(user,"-g",gid)
+ else:
+ return False
+
+ def users_set_group(self,group,*users):
+ """Changes a series of users' primary group on the target system(s)."""
+ return self.__plural(lambda u: self.user_set_group(u,group))(users)
+
+# PASSWD/CHPASWD METHODS #####################################################
+ def passwd(self,user,passwd):
+ """Changes a user's password on the target system(s)."""
+ if self.user_exists(user):
+ if system("echo "+passwd+" | passwd --stdin "+user):
+ return False
+ else:
+ return True
+ else:
+ return False
+
+# INFORMATIONAL METHODS #######################################################
+# EXISTANCE TEST METHODS
+ def user_exists(self,user):
+ """Checks to see if a given user exists on the target system(s)."""
+ try:
+ if pwd.getpwnam(user):
+ return True
+ except KeyError:
+ return False
+
+ def users_exist(self,*users):
+ """Checks to see if a series of users exists on the target system(s)."""
+ return self.__plural(self.user_exists)(users)
+
+ def uid_exists(self,uid):
+ """Checks to see if a given UID exists on the target system(s)."""
+ try:
+ if pwd.getpwuid(int(uid)):
+ return True
+ except KeyError:
+ return False
+
+ def uids_exist(self,*uids):
+ """Checks to see if a series of UIDs exists on the target system(s)."""
+ return self.__plural(self.uid_exists)(uids)
+
+ def group_exists(self,group):
+ """Checks to see if a given group exists on the target system(s)."""
+ try:
+ if grp.getgrnam(group):
+ return True
+ except KeyError:
+ return False
+
+ def groups_exist(self,*groups):
+ """Checks to see if a series of groups exist on the target system(s)."""
+ return self.__plural(self.group_exists)(groups)
+
+ def gid_exists(self,gid):
+ """Checks to see if a given GID exists on the target system(s)."""
+ try:
+ if grp.getgrgid(int(gid)):
+ return True
+ except KeyError:
+ return False
+
+ def gids_exist(self,*gids):
+ """Checks to see if a series of GIDs exist on the target system(s)."""
+ return self.__plural(self.gid_exists)(gids)
+
+# LISTING METHODS
+ def user_list(self):
+ """Lists all users on the target system(s)."""
+ users = []
+ for user in pwd.getpwall():
+ users.append(user[0])
+ return users
+
+ def users_list(self):
+ """Lists all users on the target system(s)."""
+ return self.user_list()
+
+ def uid_list(self):
+ """Lists all UIDs on the target system(s)."""
+ uids = []
+ for user in pwd.getpwall():
+ if user[2] < 4294967294:
+ uids.append(user[2])
+ return uids
+
+ def uids_list(self):
+ """Lists all UIDs on the target system(s)."""
+ return self.uid_list()
+
+ def group_list(self):
+ """Lists all groups on the target system(s)."""
+ groups = []
+ for group in grp.getgrall():
+ groups.append(group[0])
+ return groups
+
+ def groups_list(self):
+ """Lists all groups on the target system(s)."""
+ return self.group_list()
+
+ def gid_list(self):
+ """Lists all GIDs on the target system(s)."""
+ gids = []
+ for group in grp.getgrall():
+ if group[2] < 4294967294:
+ gids.append(group[2])
+ return gids
+
+ def gids_list(self):
+ """Lists all GIDs on the target system(s)."""
+ return self.gid_list()
+
+# INFO METHODS
+ def user_info(self,user):
+ """Returns user info or false for a specified user on the target system(s)."""
+ try:
+ if pwd.getpwnam(user):
+ info = pwd.getpwnam(user)
+ return list(info) # I'm not sure why this has to be listed but the method fails to work if it isn't.
+ except KeyError:
+ return False
+
+ def users_info(self,*users):
+ """Returns a list of (group info or False) for a series of users on the target system(s)."""
+ return self.__plural(self.user_info)(users)
+
+ def uid_info(self,uid):
+ """Returns user info or false for a specified user (by UID) on the target system(s)."""
+ try:
+ if pwd.getpwuid(uid):
+ info = pwd.getpwuid(int(uid))
+ return list(info)
+ except KeyError:
+ return False
+
+ def uids_info(self,*uids):
+ """Returns a list (group info or False) for a series of users (by UID) on the target system(s)."""
+ return self.__plural(self.uid_info)(uids)
+
+ def group_info(self,group):
+ """Returns group info or false for a specified group on the target system(s)."""
+ try:
+ if grp.getgrnam(group):
+ info = grp.getgrnam(group)
+ return list(info) #for some reason this needs to be list-ed
+ except KeyError:
+ return False
+
+ def groups_info(self,*groups):
+ """Returns a list (group info or False) for a series of groups on the target system(s)."""
+ return self.__plural(self.group_info)(groups)
+
+ def gid_info(self,gid):
+ """Returns group info or false for a specified group (by GID) on the target system(s)."""
+ try:
+ if grp.getgrgid(int(gid)):
+ info = grp.getgrgid(int(gid))
+ return list(info)
+ except KeyError:
+ return False
+
+ def gids_info(self,*gids):
+ """Returns a list (group info or False) for a series of groups (by GID) on the target system(s)."""
+ return self.__plural(self.gid_info)(gids)
+
+# INVENTORY METHODS
+ def user_inventory(self):
+ """Returns user info for all users on the target system(s)."""
+ return pwd.getpwall()
+
+ def users_inventory(self):
+ """Returns user info for all users on the target system(s)."""
+ return self.users_inventory()
+
+ def group_inventory(self):
+ """Returns group info for all users on the target system(s)."""
+ return grp.getgrall()
+
+ def groups_inventory(self):
+ """Returns group info for all users on the target system(s)."""
+ return self.groups_inventory()
+
+ def grep(self, word):
+ """
+ Grep some info from user_list and
+ group list
+ """
+ results = {
+ self.user_list:[],
+ self.group_list:[]
+ }
+
+ user_list = self.user_list()
+ group_list = self.group_list()
+
+ results[self.user_list].extend([res for res in user_list if res.lower().find(word)!=-1])
+ results[self.group_list].extend([res for res in group_list if res.lower().find(word)!=-1])
+ return results
+ grep = func_module.findout(grep)
+
+# CONVERSION METHODS
+ def user_to_uid(self,user):
+ """Takes a user name and converts it to the matching UID."""
+ try:
+ username = pwd.getpwnam(user)[2]
+ return username
+ except KeyError:
+ return False
+
+ def users_to_uids(self,*users):
+ """Takes a series of usernames and converts it to a list of matching UIDs."""
+ return self.__plural(self.user_to_uid)(users)
+
+ def uid_to_user(self,uid):
+ """Takes a UID and converts it to the matching user name."""
+ try:
+ user = pwd.getpwuid(int(uid))[0]
+ return user
+ except KeyError:
+ return False
+
+ def uids_to_users(self,*uids):
+ """Takes a series of UIDs and converts it to a list of matching user names."""
+ return self.__plural(self.uid_to_user)(uids)
+
+ def group_to_gid(self,group):
+ """Takes a group name and converts it to the matching GID."""
+ try:
+ groupname = grp.getgrnam(group)[2]
+ return groupname
+ except KeyError:
+ return False
+
+ def groups_to_gids(self,*groups):
+ """Takes a series of group names and converts it to a list of matching GIDs."""
+ return self.__plural(self.group_to_gid)(groups)
+
+ def gid_to_group(self,gid):
+ """Takes a GID and converts it to the matching group name."""
+ try:
+ group = grp.getgrgid(int(gid))[0]
+ return group
+ except KeyError:
+ return False
+
+ def gids_to_groups(self,*gids):
+ """Takes a series of GIDs and converts it to a list of matching group names."""
+ return self.__plural(self.gid_to_groups)(gids)
+
+######
+
+ def register_method_args(self):
+ password = {
+ 'type':'string',
+ 'optional':False,
+ 'description':'A password.'
+ }
+ cmdopt = {
+ 'type':'string',
+ 'optional':False,
+ 'description':'An option to the command.'
+ }
+ cmdopts = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'An series of options to the command.'
+ }
+ username = {
+ 'type':'string',
+ 'optional':False,
+ 'description':'A username.',
+ }
+ usernames = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'A series of usernames.',
+ }
+ group = {
+ 'type':'string',
+ 'optional':False,
+ 'description':'A group name.'
+ }
+ groups = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'A series of group names.'
+ }
+ gid = {
+ 'type':'int',
+ 'optional':False,
+ 'description':'A gid.'
+ }
+ gids = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'A series of gids.'
+ }
+ ogid = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'An optional gid.'
+ }
+ ouid = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'An optional uid.'
+ }
+ uid = {
+ 'type':'int',
+ 'optional':False,
+ 'description':'A uid.'
+ }
+ uids = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'A series of uids.'
+ }
+
+ return {
+ #GROUPADD METHODS
+ 'group_add':{
+ 'args':{
+ 'group':group,
+ 'gid':ogid
+ },
+ 'description':"Create a group."
+ },
+
+ 'groups_add':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':"Create series of groups."
+ },
+
+ 'group_add_non_unique':{
+ 'args':{
+ 'group':group,
+ 'gid':ogid
+ },
+ 'description':"Create a group."
+ },
+
+ #GROUPDEL METHODS
+ 'group_del':{
+ 'args':{
+ 'group':group
+ },
+ 'description':"Delete a group."
+ },
+
+ 'groups_del':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':"Delete a series of groups."
+ },
+
+ #GROUPMOD METHODS
+ 'group_set_gid_non_unique':{
+ 'args':{
+ 'group':group,
+ 'gid':gid
+ },
+ 'description':"Allows a groups gid to be non-unique."
+ },
+
+ 'group_set_gid':{
+ 'args':{
+ 'group':group,
+ 'gid':gid
+ },
+ 'description':"Set a group's gid."
+ },
+
+ 'group_set_groupname':{
+ 'args':{
+ 'group':group,
+ 'groupname':group
+ },
+ 'description':"Set a group's groupname."
+ },
+
+ #USERADD METHODS
+ 'user_add':{
+ 'args':{
+ 'user':username
+ },
+ 'description':"Create a user."
+ },
+
+ 'users_add':{
+ 'args':{
+ 'users':usernames
+ },
+ 'description':"Create series of users."
+ },
+
+ #USERDEL METHODS
+ 'user_del':{
+ 'args':{
+ 'user':username,
+ 'options':cmdopts,
+ },
+ 'description':"Delete a user's account."
+ },
+
+ 'users_del':{
+ 'args':{
+ 'users':usernames,
+ },
+ 'description':"Delete a series of users' accounts."
+ },
+
+ #USERMOD METHODS
+ 'user_lock':{
+ 'args':{
+ 'user':username,
+ },
+ 'description':"Lock a user's account."
+ },
+
+ 'users_lock':{
+ 'args':{
+ 'users':usernames,
+ },
+ 'description':"Lock a series of users' accounts."
+ },
+
+ 'user_set_shell':{
+ 'args':{
+ 'user':username,
+ 'shell':{
+ 'type':'string',
+ 'optional':False,
+ 'description':"A path to a shell."
+ }
+ },
+ 'description':"Set a user's shell."
+ },
+
+ 'users_set_shell':{
+ 'args':{
+ 'users':usernames,
+ 'shell':{
+ 'type':'string',
+ 'optional':False,
+ 'description':"A path to a shell."
+ }
+ },
+ 'description':"Set a series of users' shell."
+ },
+
+ 'user_set_home':{
+ 'args':{
+ 'user':username,
+ 'home':{
+ 'type':'string',
+ 'optional':False,
+ 'description':"A directory."
+ }
+ },
+ 'description':"Set a user's home folder."
+ },
+
+ 'user_set_loginname':{
+ 'args':{
+ 'user':username,
+ 'loginname':username
+ },
+ 'description':"Set a user's GECOS field."
+ },
+
+ 'user_set_comment':{
+ 'args':{
+ 'user':username,
+ 'comment':cmdopt
+ },
+ 'description':"Set a user's GECOS field."
+ },
+
+ 'user_set_expiredate':{
+ 'args':{
+ 'user':username,
+ 'expiredate':cmdopt
+ },
+ 'description':"Set a user's account's expiry date."
+ },
+
+ 'users_set_expiredate':{
+ 'args':{
+ 'expiredate':cmdopt,
+ 'users':usernames
+ },
+ 'description':"Set a series of users' accounts' expiry date."
+ },
+
+ 'user_set_uid_non_unique':{
+ 'args':{
+ 'user':username,
+ 'uid':uid
+ },
+ 'description':"Set a user's uid."
+ },
+
+ 'user_set_uid':{
+ 'args':{
+ 'user':username,
+ 'uid':uid
+ },
+ 'description':"Set a user's uid."
+ },
+
+ 'user_set_inactive':{
+ 'args':{
+ 'user':username,
+ 'inactive':cmdopt
+ },
+ 'description':"Set a user's inactivity timer."
+ },
+
+ 'users_set_inactive':{
+ 'args':{
+ 'inactive':cmdopt,
+ 'users':usernames
+ },
+ 'description':"Set a series of users' inactivity timer."
+ },
+
+ 'user_set_gid':{
+ 'args':{
+ 'user':username,
+ 'gid':gid
+ },
+ 'description':"Set a user's gid."
+ },
+
+ 'users_set_gid':{
+ 'args':{
+ 'gid':gid,
+ 'users':usernames
+ },
+ 'description':"Set a series of users' gids."
+ },
+
+ 'user_move_home':{
+ 'args':{
+ 'user':username,
+ 'home':{
+ 'type':'string',
+ 'optional':False,
+ 'description':"A directory."
+ }
+ },
+ 'description':"Set a user's home folder and move the contents to the new folder."
+ },
+
+ 'user_unlock':{
+ 'args':{
+ 'user':username,
+ },
+ 'description':"Unlock a user's account."
+ },
+
+ 'users_unlock':{
+ 'args':{
+ 'users':usernames,
+ },
+ 'description':"Unlock a series of users' account."
+ },
+
+ 'user_add_to_group':{
+ 'args':{
+ 'group':group,
+ 'user':username,
+ },
+ 'description':"Append a user to a group."
+ },
+
+ 'users_add_to_group':{
+ 'args':{
+ 'group':group,
+ 'users':usernames,
+ },
+ 'description':"Append a series of users to a group."
+ },
+
+ 'user_set_group':{
+ 'args':{
+ 'group':group,
+ 'user':username,
+ },
+ 'description':"Set a user's group."
+ },
+
+ 'users_set_group':{
+ 'args':{
+ 'group':group,
+ 'users':usernames,
+ },
+ 'description':"Set a series of users' group."
+ },
+
+ #PASSWD METHODS
+ 'passwd':{
+ 'args':{
+ 'user':username,
+ 'passwd':password
+ },
+ 'description':"Change a user's password."
+ },
+
+ #EXISTANCE TEST METHODS
+ 'uid_exists':{
+ 'args':{
+ 'uid':uid
+ },
+ 'description':'Test the existance of a uids.'
+ },
+
+ 'uids_exist':{
+ 'args':{
+ 'uids':uids
+ },
+ 'description':'Test the existance of a series of uids.'
+ },
+
+ 'gid_exists':{
+ 'args':{
+ 'gid':gid
+ },
+ 'description':'Test the existance of a gids.'
+ },
+
+ 'gids_exist':{
+ 'args':{
+ 'gids':gids
+ },
+ 'description':'Test the existance of a series of groups.'
+ },
+
+ 'user_exists':{
+ 'args':{
+ 'user':username
+ },
+ 'description':'Test the existance of a users.'
+ },
+
+ 'users_exist':{
+ 'args':{
+ 'users':usernames
+ },
+ 'description':'Test the existance of a series of users.'
+ },
+
+ 'group_exists':{
+ 'args':{
+ 'group':group
+ },
+ 'description':'Test the existance of a groups.'
+ },
+
+ 'groups_exist':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':'Test the existance of a series of groups.'
+ },
+
+ #LISTING METHODS
+ 'uid_list':{
+ 'args':{},
+ 'description':'Get a list of all uids.'
+ },
+
+ 'uids_list':{
+ 'args':{},
+ 'description':'Get a list of all uids.'
+ },
+
+ 'gid_list':{
+ 'args':{},
+ 'description':'Get a list of all gids.'
+ },
+
+ 'gids_list':{
+ 'args':{},
+ 'description':'Get a list of all groups.'
+ },
+
+ 'user_list':{
+ 'args':{},
+ 'description':'Get a list of all users.'
+ },
+
+ 'users_list':{
+ 'args':{},
+ 'description':'Get a list of all users.'
+ },
+
+ 'group_list':{
+ 'args':{},
+ 'description':'Get a list of all groups.'
+ },
+
+ 'groups_list':{
+ 'args':{},
+ 'description':'Get a list of all groups.'
+ },
+
+ #INFO METHODS
+ 'user_info':{
+ 'args':{
+ 'user':username
+ },
+ 'description':'Fetch info for a specified user.'
+ },
+
+ 'users_info':{
+ 'args':{
+ 'users':usernames
+ },
+ 'description':'Fetch info for a specified series of users.'
+ },
+
+ 'uid_info':{
+ 'args':{
+ 'uid':uid
+ },
+ 'description':'Fetch info for a specified uid.'
+ },
+
+ 'uids_info':{
+ 'args':{
+ 'uids':uids
+ },
+ 'description':'Fetch info for a specified series of uids.'
+ },
+
+ 'group_info':{
+ 'args':{
+ 'group':group
+ },
+ 'description':'Fetch info for a specified group.'
+ },
+
+ 'groups_info':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':'Fetch info for a specified series of groups.'
+ },
+
+ 'gid_info':{
+ 'args':{
+ 'gid':gid
+ },
+ 'description':'Fetch info for a specified gid.'
+ },
+
+ 'gids_info':{
+ 'args':{
+ 'gids':gids
+ },
+ 'description':'Fetch info for a specified series of gids.'
+ },
+
+ #INVENTORY METHODS
+ 'user_inventory':{
+ 'args':{},
+ 'description':'Get user info for all users.'
+ },
+
+ 'users_inventory':{
+ 'args':{},
+ 'description':'Get user info for all users.'
+ },
+
+ 'group_inventory':{
+ 'args':{},
+ 'description':'Get group info for all groups.'
+ },
+
+ 'groups_inventory':{
+ 'args':{},
+ 'description':'Get group info for all groups.'
+ },
+
+ #CONVERSION METHODS
+ 'user_to_uid':{
+ 'args':{
+ 'user':username
+ },
+ 'description':'Convert a username to a matching uid.'
+ },
+
+ 'users_to_uids':{
+ 'args':{
+ 'users':usernames
+ },
+ 'description':'Convert a series of usernames to a list of matching uids.'
+ },
+
+ 'uid_to_user':{
+ 'args':{
+ 'uid':uid
+ },
+ 'description':'Convert a uid to a username.'
+ },
+
+ 'uids_to_users':{
+ 'args':{
+ 'uids':uids
+ },
+ 'description':'Convert a series of uids to a list of matching usernames.'
+ },
+
+ 'group_to_gid':{
+ 'args':{
+ 'group':group
+ },
+ 'description':'Convert a group to a matching gid.'
+ },
+
+ 'groups_to_gids':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':'Converts a series of groups to a list of matching gids.'
+ },
+
+ 'gid_to_group':{
+ 'args':{
+ 'gid':gid
+ },
+ 'description':'Converts a gids to a matching groupname.'
+ },
+
+ 'gids_to_groups':{
+ 'args':{
+ 'gids':gids
+ },
+ 'description':'Converts a series of gids to a list of matching groupnames.'
+ }
+
+ }
+
+
+ def ree(self):
+ password = {
+ 'type':'string',
+ 'optional':False,
+ 'description':'A password.'
+ }
+ cmdopt = {
+ 'type':'string',
+ 'optional':False,
+ 'description':'An option to the command.'
+ }
+ cmdopts = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'An series of options to the command.'
+ }
+ username = {
+ 'type':'string',
+ 'optional':False,
+ 'description':'A username.',
+ }
+ usernames = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'A series of usernames.',
+ }
+ group = {
+ 'type':'string',
+ 'optional':False,
+ 'description':'A group name.'
+ }
+ groups = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'A series of group names.'
+ }
+ gid = {
+ 'type':'int',
+ 'optional':False,
+ 'description':'A gid.'
+ }
+ gids = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'A series of gids.'
+ }
+ ogid = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'An optional gid.'
+ }
+ ouid = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'An optional uid.'
+ }
+ uid = {
+ 'type':'int',
+ 'optional':False,
+ 'description':'A uid.'
+ }
+ uids = {
+ 'type':'list*',
+ 'optional':False,
+ 'description':'A series of uids.'
+ }
+
+ return {
+ #GROUPADD METHODS
+ 'group_add':{
+ 'args':{
+ 'group':group,
+ 'gid':ogid
+ },
+ 'description':"Create a group."
+ },
+
+ 'groups_add':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':"Create series of groups."
+ },
+
+ 'group_add_non_unique':{
+ 'args':{
+ 'group':group,
+ 'gid':ogid
+ },
+ 'description':"Create a group."
+ },
+
+ #GROUPDEL METHODS
+ 'group_del':{
+ 'args':{
+ 'group':group
+ },
+ 'description':"Delete a group."
+ },
+
+ 'groups_del':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':"Delete a series of groups."
+ },
+
+ #GROUPMOD METHODS
+ 'group_set_gid_non_unique':{
+ 'args':{
+ 'group':group,
+ 'gid':gid
+ },
+ 'description':"Allows a groups gid to be non-unique."
+ },
+
+ 'group_set_gid':{
+ 'args':{
+ 'group':group,
+ 'gid':gid
+ },
+ 'description':"Set a group's gid."
+ },
+
+ 'group_set_groupname':{
+ 'args':{
+ 'group':group,
+ 'groupname':group
+ },
+ 'description':"Set a group's groupname."
+ },
+
+ #USERADD METHODS
+ 'user_add':{
+ 'args':{
+ 'user':username
+ },
+ 'description':"Create a user."
+ },
+
+ 'users_add':{
+ 'args':{
+ 'users':usernames
+ },
+ 'description':"Create series of users."
+ },
+
+ #USERDEL METHODS
+ 'user_del':{
+ 'args':{
+ 'user':username,
+ 'options':cmdopts,
+ },
+ 'description':"Delete a user's account."
+ },
+
+ 'users_del':{
+ 'args':{
+ 'users':usernames,
+ 'options':cmdopts,
+ },
+ 'description':"Delete a series of users' accounts."
+ },
+
+ #USERMOD METHODS
+ 'user_lock':{
+ 'args':{
+ 'user':username,
+ },
+ 'description':"Lock a user's account."
+ },
+
+ 'users_lock':{
+ 'args':{
+ 'users':usernames,
+ },
+ 'description':"Lock a series of users' accounts."
+ },
+
+ 'user_set_shell':{
+ 'args':{
+ 'user':username,
+ 'shell':{
+ 'type':'string',
+ 'optional':False,
+ 'description':"A path to a shell."
+ }
+ },
+ 'description':"Set a user's shell."
+ },
+
+ 'users_set_shell':{
+ 'args':{
+ 'users':usernames,
+ 'shell':{
+ 'type':'string',
+ 'optional':False,
+ 'description':"A path to a shell."
+ }
+ },
+ 'description':"Set a series of users' shell."
+ },
+
+ 'user_set_home':{
+ 'args':{
+ 'user':username,
+ 'home':{
+ 'type':'string',
+ 'optional':False,
+ 'description':"A directory."
+ }
+ },
+ 'description':"Set a user's home folder."
+ },
+
+ 'user_set_loginname':{
+ 'args':{
+ 'user':username,
+ 'loginname':username
+ },
+ 'description':"Set a user's GECOS field."
+ },
+
+ 'user_set_comment':{
+ 'args':{
+ 'user':username,
+ 'comment':cmdopt
+ },
+ 'description':"Set a user's GECOS field."
+ },
+
+ 'user_set_expiredate':{
+ 'args':{
+ 'user':username,
+ 'expiredate':cmdopt
+ },
+ 'description':"Set a user's account's expiry date."
+ },
+
+ 'users_set_expiredate':{
+ 'args':{
+ 'expiredate':cmdopt,
+ 'users':usernames
+ },
+ 'description':"Set a series of users' accounts' expiry date."
+ },
+
+ 'user_set_uid_non_unique':{
+ 'args':{
+ 'user':username
+ },
+ 'description':"Set a user's uid."
+ },
+
+ 'user_set_uid':{
+ 'args':{
+ 'user':username,
+ 'uid':uid
+ },
+ 'description':"Set a user's uid."
+ },
+
+ 'user_set_inactive':{
+ 'args':{
+ 'user':username,
+ 'inactive':cmdopt
+ },
+ 'description':"Set a user's inactivity timer."
+ },
+
+ 'users_set_inactive':{
+ 'args':{
+ 'inactive':cmdopt,
+ 'users':usernames
+ },
+ 'description':"Set a series of users' inactivity timer."
+ },
+
+ 'user_set_gid':{
+ 'args':{
+ 'user':username,
+ 'gid':gid
+ },
+ 'description':"Set a user's gid."
+ },
+
+ 'users_set_gid':{
+ 'args':{
+ 'gid':gid,
+ 'users':usernames
+ },
+ 'description':"Set a series of users' gids."
+ },
+
+ 'user_move_home':{
+ 'args':{
+ 'user':username,
+ 'home':{
+ 'type':'string',
+ 'optional':False,
+ 'description':"A directory."
+ }
+ },
+ 'description':"Set a user's home folder and move the contents to the new folder."
+ },
+
+ 'user_unlock':{
+ 'args':{
+ 'user':username,
+ },
+ 'description':"Unlock a user's account."
+ },
+
+ 'users_unlock':{
+ 'args':{
+ 'users':usernames,
+ },
+ 'description':"Unlock a series of users' account."
+ },
+
+ 'user_add_to_group':{
+ 'args':{
+ 'group':group,
+ 'user':username,
+ },
+ 'description':"Append a user to a group."
+ },
+
+ 'users_add_to_group':{
+ 'args':{
+ 'group':group,
+ 'users':usernames,
+ },
+ 'description':"Append a series of users to a group."
+ },
+
+ 'user_set_group':{
+ 'args':{
+ 'group':group,
+ 'user':username,
+ },
+ 'description':"Set a user's group."
+ },
+
+ 'users_set_group':{
+ 'args':{
+ 'group':group,
+ 'users':usernames,
+ },
+ 'description':"Set a series of users' group."
+ },
+
+ #EXISTANCE TEST METHODS
+ 'uid_exists':{
+ 'args':{
+ 'uid':uid
+ },
+ 'description':'Test the existance of a uids.'
+ },
+
+ 'uids_exist':{
+ 'args':{
+ 'uids':uids
+ },
+ 'description':'Test the existance of a series of uids.'
+ },
+
+ 'gid_exists':{
+ 'args':{
+ 'gid':gid
+ },
+ 'description':'Test the existance of a gids.'
+ },
+
+ 'gids_exist':{
+ 'args':{
+ 'gids':gids
+ },
+ 'description':'Test the existance of a series of groups.'
+ },
+
+ 'user_exists':{
+ 'args':{
+ 'user':username
+ },
+ 'description':'Test the existance of a users.'
+ },
+
+ 'users_exist':{
+ 'args':{
+ 'users':usernames
+ },
+ 'description':'Test the existance of a series of users.'
+ },
+
+ 'group_exists':{
+ 'args':{
+ 'group':group
+ },
+ 'description':'Test the existance of a groups.'
+ },
+
+ 'groups_exist':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':'Test the existance of a series of groups.'
+ },
+
+ #LISTING METHODS
+ 'uid_list':{
+ 'args':{},
+ 'description':'Get a list of all uids.'
+ },
+
+ 'uids_list':{
+ 'args':{},
+ 'description':'Get a list of all uids.'
+ },
+
+ 'gid_list':{
+ 'args':{},
+ 'description':'Get a list of all gids.'
+ },
+
+ 'gids_list':{
+ 'args':{},
+ 'description':'Get a list of all groups.'
+ },
+
+ 'user_list':{
+ 'args':{},
+ 'description':'Get a list of all users.'
+ },
+
+ 'users_list':{
+ 'args':{},
+ 'description':'Get a list of all users.'
+ },
+
+ 'group_list':{
+ 'args':{},
+ 'description':'Get a list of all groups.'
+ },
+
+ 'groups_list':{
+ 'args':{},
+ 'description':'Get a list of all groups.'
+ },
+
+ #INFO METHODS
+ 'user_info':{
+ 'args':{
+ 'user':username
+ },
+ 'description':'Fetch info for a specified user.'
+ },
+
+ 'users_info':{
+ 'args':{
+ 'users':usernames
+ },
+ 'description':'Fetch info for a specified series of users.'
+ },
+
+ 'uid_info':{
+ 'args':{
+ 'uid':uid
+ },
+ 'description':'Fetch info for a specified uid.'
+ },
+
+ 'uids_info':{
+ 'args':{
+ 'uids':uids
+ },
+ 'description':'Fetch info for a specified series of uids.'
+ },
+
+ 'group_info':{
+ 'args':{
+ 'group':group
+ },
+ 'description':'Fetch info for a specified group.'
+ },
+
+ 'groups_info':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':'Fetch info for a specified series of groups.'
+ },
+
+ 'gid_info':{
+ 'args':{
+ 'gid':gid
+ },
+ 'description':'Fetch info for a specified gid.'
+ },
+
+ 'gids_info':{
+ 'args':{
+ 'gids':gids
+ },
+ 'description':'Fetch info for a specified series of gids.'
+ },
+
+ #INVENTORY METHODS
+ 'user_inventory':{
+ 'args':{},
+ 'description':'Get user info for all users.'
+ },
+
+ 'users_inventory':{
+ 'args':{},
+ 'description':'Get user info for all users.'
+ },
+
+ 'group_inventory':{
+ 'args':{},
+ 'description':'Get group info for all groups.'
+ },
+
+ 'groups_inventory':{
+ 'args':{},
+ 'description':'Get group info for all groups.'
+ },
+
+ #CONVERSION METHODS
+ 'user_to_uid':{
+ 'args':{
+ 'user':username
+ },
+ 'description':'Convert a username to a matching uid.'
+ },
+
+ 'users_to_uids':{
+ 'args':{
+ 'users':usernames
+ },
+ 'description':'Convert a series of usernames to a list of matching uids.'
+ },
+
+ 'uid_to_user':{
+ 'args':{
+ 'uid':uid
+ },
+ 'description':'Convert a uid to a username.'
+ },
+
+ 'uids_to_users':{
+ 'args':{
+ 'uids':uids
+ },
+ 'description':'Convert a series of uids to a list of matching usernames.'
+ },
+
+ 'group_to_gid':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':'Convert a group to a matching gid.'
+ },
+
+ 'groups_to_gids':{
+ 'args':{
+ 'groups':groups
+ },
+ 'description':'Converts a series of groups to a list of matching gids.'
+ },
+
+ 'gid_to_group':{
+ 'args':{
+ 'gid':gid
+ },
+ 'description':'Converts a gids to a matching groupname.'
+ },
+
+ 'gids_to_groups':{
+ 'args':{
+ 'gids':gids
+ },
+ 'description':'Converts a series of gids to a list of matching groupnames.'
+ }
+
+ }
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/virt.py
^
|
@@ -17,6 +17,10 @@
# service control module. API docs on how
# to use this to come.
+# xml modules
+import StringIO
+from xml.dom import minidom
+
# other modules
import os
import sub_process
@@ -68,7 +72,7 @@
# this block of code borrowed from virt-manager:
# get working domain's name
- ids = conn.listDomainsID();
+ ids = conn.listDomainsID()
for id in ids:
vm = conn.lookupByID(id)
vms.append(vm)
@@ -126,6 +130,26 @@
def get_type(self):
return self.conn.getType()
+ def set_vcpus(self, vmid, num):
+ vm = self.conn.lookupByName(vmid)
+ return vm.setVcpus(num)
+
+ def get_maxVcpus(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.maxVcpus()
+
+ def set_maxMemory(self, vmid, amount):
+ vm = self.conn.lookupByName(vmid)
+ return vm.setMaxMemory(amount)
+
+ def get_maxMemory(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.maxMemory()
+
+ def set_memory(self, vmid, amount):
+ vm = self.conn.lookupByName(vmid)
+ return vm.setMemory(amount)
+
class Virt(func_module.FuncModule):
@@ -189,35 +213,35 @@
return results
def virttype(self):
- return self.__get_conn().get_type()
+ return self.__get_conn().get_type()
def autostart(self, vm):
- self.conn = self.__get_conn()
- if self.conn.get_type() == "Xen":
- autostart_args = [
- "/bin/ln",
- "-s",
- "/etc/xen/%s" % vm,
- "/etc/xen/auto"
- ]
+ self.conn = self.__get_conn()
+ if self.conn.get_type() == "Xen":
+ autostart_args = [
+ "/bin/ln",
+ "-s",
+ "/etc/xen/%s" % vm,
+ "/etc/xen/auto"
+ ]
else:
# When using KVM, we need to make sure the autostart
# directory exists
- mkdir_args = [
- "/bin/mkdir",
- "-p",
- "/etc/libvirt/qemu/autostart"
- ]
+ mkdir_args = [
+ "/bin/mkdir",
+ "-p",
+ "/etc/libvirt/qemu/autostart"
+ ]
sub_process.call(mkdir_args,shell=False,close_fds=True)
# We aren't using virsh autostart because we want
# the command to work even when the VM isn't running
- autostart_args = [
- "/bin/ln",
- "-s",
- "/etc/libvirt/qemu/%s.xml" % vm,
- "/etc/libvirt/qemu/autostart/%s.xml" % vm
- ]
+ autostart_args = [
+ "/bin/ln",
+ "-s",
+ "/etc/libvirt/qemu/%s.xml" % vm,
+ "/etc/libvirt/qemu/autostart/%s.xml" % vm
+ ]
return sub_process.call(autostart_args,shell=False,close_fds=True)
@@ -355,3 +379,128 @@
self.__get_conn()
return self.conn.get_status(vmid)
+
+
+ def get_xml(self, vmid):
+ """
+ Recieve a Vm id as input
+ Return an xml describing vm config returned by a libvirt call
+ """
+ conn = libvirt.openReadOnly(None)
+ if conn == None:
+ return (-1,'Failed to open connection to the hypervisor')
+ try:
+ domV = conn.lookupByName(vmid)
+ except:
+ return (-1,'Failed to find the main domain')
+ return domV.XMLDesc(0)
+
+
+ def get_graphics(self,vmid,xml='None'):
+ """
+ Recieve a Vm id as input
+ Read machine informations from xml config,
+ return a key/val map containing only graphics properties
+ """
+ out = {'autoport': 'None', 'keymap': 'None', 'type': 'vnc', 'port': 'None', 'listen': 'None'}
+ if(xml=='None'):
+ xml = self.get_xml(vmid)
+ else:
+ xml = "<domain>\n"+xml+"\n</domain>"
+ ssock = StringIO.StringIO(xml)
+ doc = minidom.parse(ssock)
+ for node in doc.getElementsByTagName("domain"):
+ graphics = node.getAttribute("devices")
+ L = node.getElementsByTagName("graphics")
+ for node2 in L:
+ for k in node2.attributes.keys():
+ out[k] = node2.getAttribute(k)
+ return out
+
+
+ def set_graphics(self,vmid,xml):
+ """
+ Recieve a Vm id and a piece of xml as input
+ Set vnc address and parameters of vm in xml config file
+ Return 0 if config has been correctly written
+ """
+ try:
+ conn = libvirt.openReadOnly(None)
+ tmp = conn.getType()
+ except:
+ return (-1,'Failed to open connection to the hypervisor')
+ strxml = self.get_graphics(vmid,xml)
+ str = "vfb = [ \"vncunused=1, "
+
+ for el in strxml:
+ if(strxml[el] != 'None'):
+ if(el == 'port'):
+ str = "%s%s=\'%s\', " % (str,"vncdisplay",(int(strxml[el])-5900))
+ else:
+ str = "%s%s=\'%s\', " % (str,el,strxml[el])
+ str = "%s\" ]" % str.rstrip(' ').rstrip(',')
+
+ if(tmp == "Xen"):
+ if os.path.exists("/etc/xen/%s" % vmid):
+ return os.system("sed -i 's/^vfb.*/%s/g' /etc/xen/%s" % (str,vmid))
+ else:
+ return (-1,'Config file /etc/xen/%s not found' % vmid)
+ else:
+ if os.path.exists("/etc/libvirt/qemu/%s.xml" % vmid):
+ xml = self.get_xml(vmid)
+ ssock = StringIO.StringIO(xml)
+ doc = minidom.parse(ssock)
+ for node in doc.getElementsByTagName("domain"):
+ graphics = node.getAttribute("devices")
+ L = node.getElementsByTagName("graphics")
+ for node2 in L:
+ for el in strxml:
+ if(strxml[el] != 'None'):
+ node2.setAttribute(el,strxml[el])
+
+ output_xml = open("/etc/libvirt/qemu/%s.xml" % vmid, 'w')
+ output_xml.write(node.toxml())
+ return 0
+ else:
+ return (-1,'Config file /etc/libvirt/qemu/%s.xml not found' % vmid)
+ return (-2,'Unmatched Condition in set_graphics method')
+
+ def set_vcpus(self, vmid, num):
+ """
+ Sets the number of VCPUs on a guest
+ """
+
+ self.__get_conn()
+ return self.conn.set_vcpus(num)
+
+ def get_maxVcpus(self, vmid):
+ """
+ Gets the max number of VCPUs on a guest
+ """
+
+ self.__get_conn()
+ return self.conn.get_maxVcpus(num)
+
+ def set_maxMemory(self, vmid, amount):
+ """
+ Sets the max memory on a guest
+ """
+
+ self.__get_conn()
+ return self.conn.set_maxMemory(vmid, amount)
+
+ def get_maxMemory(self, vmid):
+ """
+ Gets the max memory on a guest
+ """
+
+ self.__get_conn()
+ return self.conn.get_MaxMemory(vmid)
+
+ def set_memory(self, vmid, amount):
+ """
+ Sets the memory on a guest
+ """
+
+ self.__get_conn()
+ return self.conn.set_memory(vmid, amount)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/vlan.py
^
|
@@ -1,4 +1,3 @@
-#!/usr/bin/python
#
# Copyright 2008, Stone-IT
# Jasper Capel <capel@stone-it.com>
@@ -18,27 +17,40 @@
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
+"""
+Func module for VLAN management
+"""
+
+__author__ = "Jasper Capel <capel@stone-it.com>"
+__version__ = "0.0.3"
+__api_version__ = "0.0.2"
+
import func_module
import os, re
+from certmaster.config import BaseConfig, Option, ListOption
+
class Vlan(func_module.FuncModule):
- version = "0.0.2"
- api_version = "0.0.2"
+ version = __version__
+ api_version = __api_version__
description = "Func module for VLAN management"
- # A list of VLAN IDs that should be ignored.
- # You can use this if you have VLAN IDs which are reserved for internal
- # use, which should never be touched by func.
- # Use strings here, not integers!
- ignorevlans = [ ]
- vconfig = "/sbin/vconfig"
- ip = "/sbin/ip"
- ifup = "/sbin/ifup"
- ifdown = "/sbin/ifdown"
+ class Config(BaseConfig):
+ # A list of VLAN IDs that should be ignored.
+ # You can use this if you have VLAN IDs which are reserved for internal
+ # use, which should never be touched by func.
+ # Use strings here, not integers!
+ ignorevlans = ListOption()
+ vconfig = Option("/sbin/vconfig")
+ ip = Option("/sbin/ip")
+ ifup = Option("/sbin/ifup")
+ ifdown = Option("/sbin/ifdown")
def list(self):
- # Returns a dictionary, elements look like this:
- # key: interface, value: [id1, id2, id3]
+ """
+ Returns a dictionary, elements look like this:
+ key: interface, value: [id1, id2, id3]
+ """
retlist = {}
@@ -64,8 +76,10 @@
return retlist
def list_permanent(self):
- # Returns a dictionary of permanent VLANs, return format is the same as
- # in the list() method.
+ """
+ Returns a dictionary of permanent VLANs, return format is the same as
+ in the list() method.
+ """
retlist = {}
pattern = re.compile('ifcfg-([a-z0-9]+)\.([0-9]+)')
@@ -85,16 +99,31 @@
def add(self, interface, vlanid):
- # Adds a vlan with vlanid on interface
- if vlanid not in self.ignorevlans:
- exitcode = os.spawnv(os.P_WAIT, self.vconfig, [ self.vconfig, "add", interface, str(vlanid)] )
+ """
+ Adds a vlan to an interface
+
+ Keyword arguments:
+ interface -- interface to add vlan to (string, for example: "eth0")
+ vlanid -- ID of the vlan to add (string, for example: "1100")
+ """
+ if vlanid not in self.options.ignorevlans:
+ exitcode = os.spawnv(os.P_WAIT, self.options.vconfig, [ self.options.vconfig, "add", interface, str(vlanid)] )
else:
exitcode = -1
-
+
return exitcode
def add_permanent(self, interface, vlanid, ipaddr=None, netmask=None, gateway=None):
- # Permanently adds a VLAN by writing to an ifcfg-file
+ """
+ Permanently adds a VLAN by writing to an ifcfg-file
+
+ Keyword arguments:
+ interface -- interface to add vlan to (string, for example: "eth0")
+ vlanid -- ID of the vlan to add (string, for example: "1100")
+ ipaddr -- IP-address for this VLAN (string)
+ netmask -- Netmask for this VLAN (string)
+ gateway -- Gateway for this VLAN (string)
+ """
alreadyup = False
list = self.list()
if interface in list:
@@ -103,7 +132,7 @@
alreadyup = True
device = "%s.%s" % (interface, vlanid)
- if vlanid not in self.ignorevlans:
+ if vlanid not in self.options.ignorevlans:
filename = "/etc/sysconfig/network-scripts/ifcfg-%s" % device
fp = open(filename, "w")
filelines = [ "DEVICE=%s\n" % device, "VLAN=yes\n", "ONBOOT=yes\n" ]
@@ -123,23 +152,37 @@
# Don't run ifup, this will confuse the OS
exitcode = self.up(interface, vlanid)
else:
- exitcode = os.spawnv(os.P_WAIT, self.ifup, [ self.ifup, device ])
+ exitcode = os.spawnv(os.P_WAIT, self.options.ifup, [ self.options.ifup, device ])
else:
exitcode = -1
return exitcode
def delete(self, interface, vlanid):
- # Deletes a vlan with vlanid from interface
+ """
+ Deletes a vlan from an interface.
+
+ Keyword arguments:
+ interface -- Interface to delete vlan from (string, example: "eth0")
+ vlanid -- Vlan ID to remove (string, example: "1100")
+ """
vintfname = interface + "." + str(vlanid)
- if vlanid not in self.ignorevlans:
- exitcode = os.spawnv(os.P_WAIT, self.vconfig, [ self.vconfig, "rem", vintfname] )
+ if vlanid not in self.options.ignorevlans:
+ exitcode = os.spawnv(os.P_WAIT, self.options.vconfig, [ self.options.vconfig, "rem", vintfname] )
else:
exitcode = -1
return exitcode
def delete_permanent(self, interface, vlanid):
- if vlanid not in self.ignorevlans:
+ """
+ Permanently removes a vlan from an interface. This is useful when the vlan is configured through an ifcfg-file.
+
+ Keyword arguments:
+ interface -- interface to delete vlan from (string, example: "eth0")
+ vlanid -- Vlan ID to remove (string, example: "1100")
+ """
+
+ if vlanid not in self.options.ignorevlans:
device = "%s.%s" % (interface, vlanid)
filename = "/etc/sysconfig/network-scripts/ifcfg-%s" % device
self.down(interface, vlanid)
@@ -152,29 +195,46 @@
return exitcode
def up(self, interface, vlanid):
- # Marks a vlan interface as up
+ """
+ Marks a vlan interface as up
+
+ Keyword arguments:
+ interface -- interface this vlan resides on (string, example: "eth0")
+ vlanid -- ID for this vlan (string, example: "1100")
+ """
+
vintfname = interface + "." + str(vlanid)
- if vlanid not in self.ignorevlans:
- exitcode = os.spawnv(os.P_WAIT, self.ip, [ self.ip, "link", "set", vintfname, "up" ])
+ if vlanid not in self.options.ignorevlans:
+ exitcode = os.spawnv(os.P_WAIT, self.options.ip, [ self.options.ip, "link", "set", vintfname, "up" ])
else:
exitcode = -1
return exitcode
def down(self, interface, vlanid):
- # Marks a vlan interface as down
+ """
+ Marks a vlan interface as down
+
+ Keyword arguments:
+ interface -- interface this vlan resides on (string, example: "eth0")
+ vlanid -- ID for this vlan (string, example: "1100")
+ """
vintfname = interface + "." + str(vlanid)
- if vlanid not in self.ignorevlans:
- exitcode = os.spawnv(os.P_WAIT, self.ip, [ self.ip, "link", "set", vintfname, "down" ])
+ if vlanid not in self.options.ignorevlans:
+ exitcode = os.spawnv(os.P_WAIT, self.options.ip, [ self.options.ip, "link", "set", vintfname, "down" ])
else:
exitcode = -1
return exitcode
def make_it_so(self, configuration):
- # Applies the supplied configuration to the system.
- # Configuration is a dictionary, elements should look like this:
- # key: interface, value: [id1, id2, id3]
+ """
+ Applies the supplied configuration to the system.
+
+ Keyword arguments:
+ configuration -- dictionary, elements should look like this: key: interface, value: [id1, id2, id3]
+ """
+
currentconfig = self.list()
newconfig = {}
@@ -216,10 +276,12 @@
# Todo: Compare the current configuration to the supplied configuration
return self.list()
-
+
def write(self):
- # Permantly applies configuration obtained through the list() method to
- # the system.
+ """
+ Permantly applies configuration obtained through the list() method to the system.
+ """
+
currentconfig = self.list_permanent()
newconfig = self.list()
@@ -239,7 +301,7 @@
if interface not in currentconfig:
for vlan in vlans:
self.add_permanent(interface, vlan)
-
+
else:
for vlan in vlans:
if vlan not in currentconfig[interface]:
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/modules/yumcmd.py
^
|
@@ -1,7 +1,8 @@
-# Copyright 2007, Red Hat, Inc
+# Copyright 2010, Red Hat, Inc
# James Bowes <jbowes@redhat.com>
# Alex Wood <awood@redhat.com>
-#
+# Seth Vidal <skvidal@fedoraproject.org>
+
# This software may be freely redistributed under the terms of the GNU
# general public license.
#
@@ -9,9 +10,15 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-import func_module
+# TODOS:
+# - config_dict handling
+# multiple commands in a single call - multiple() == yum shell
+# - permanent config changes
+# - better _makeresults() that doesn't make me kinda hurl and makes the output more sensible
-import yum
+
+import func_module
+from codes import FuncException
# XXX Use internal yum callback or write a useful one.
class DummyCallback(object):
@@ -19,30 +26,109 @@
def event(self, state, data=None):
pass
+def _makeresults(tsInfo):
+ results = ''
+ for pkg in tsInfo:
+ # FIXME obviously much more should happen here :)
+ if pkg.ts_state:
+ results += '%s\n' % pkg
+
+ return results
+
+def _singleAction(action, items=[], config_dict={}, **kwargs):
+ #FIXME - config_dict needs to do the equiv of --setopt in the yumcli
+ import yum
+ ayum = yum.YumBase()
+ ayum.doGenericSetup()
+ ayum.doRepoSetup()
+ if type(items) == type([]):
+ pkglist = []
+ for p in items:
+ pkglist.extend(p.split(' '))
+ else:
+ if items:
+ pkglist = items.split(' ')
+ else:
+ pkglist = []
+
+ if len(pkglist) == 0 and action not in ('update', 'upgrade'):
+ raise FuncException("%s requires at least one pkg" % action)
+
+ results = 'command: %s %s\n' % (action, ' '.join(pkglist))
+ try:
+ ayum.doLock()
+ if pkglist:
+ for p in pkglist:
+ tx_mbrs = []
+ if action == 'install':
+ tx_mbrs = ayum.install(pattern=p)
+ elif action in ('remove', 'erase'):
+ tx_mbrs = ayum.remove(pattern=p)
+
+ elif action in ('update', 'upgrade'):
+ tx_mbrs = ayum.update(pattern=p)
+
+ if not tx_mbrs:
+ results += "No %s matched for %s\n" % (action, p)
+
+ else:
+ ayum.update()
+
+ ayum.buildTransaction()
+ ayum.processTransaction(
+ callback=DummyCallback())
+ finally:
+ results += _makeresults(ayum.tsInfo)
+ ayum.closeRpmDB()
+ ayum.doUnlock()
+ return results
+
class Yum(func_module.FuncModule):
version = "0.0.1"
api_version = "0.1.0"
description = "Package updates through yum."
- def update(self, pkg=None):
+ from yum import __version__ as yumversion
+ yvertuple = yumversion.split('.')
+ if int(yvertuple[0]) == 3 and int(yvertuple[2]) >= 25:
+ def rpmdbVersion(self, **kwargs):
+ import yum
+ ayum = yum.YumBase()
+ versionlist = ayum.rpmdb.simpleVersion(main_only=True)
+ version = versionlist[0]
+ return versionlist
+
+ def update(self, pkg=None, config_dict={}):
+ return _singleAction('update', items=pkg, config_dict=config_dict)
+
+ def install(self, pkg=None, config_dict={}):
+ return _singleAction('install', items=pkg, config_dict=config_dict)
+
+ def remove(self, pkg=None, config_dict={}):
+ return _singleAction('remove', items=pkg, config_dict=config_dict)
+
+ #def multiple(self, cmdlist=[]):
+ # """take multiple commands as a single transaction - equiv of yum shell"""
+ # raise FuncException("Not Implemented Yet!"
+
+ def get_package_lists(self, pkgspec='installed,available,obsoletes,updates,extras', config_dict={}):
+ import yum
ayum = yum.YumBase()
ayum.doGenericSetup()
ayum.doRepoSetup()
- try:
- ayum.doLock()
- if pkg != None:
- tx_result = ayum.update(pattern=pkg)
- else:
- tx_result = ayum.update()
-
- ayum.buildTransaction()
- ayum.processTransaction(
- callback=DummyCallback())
- finally:
- ayum.closeRpmDB()
- ayum.doUnlock()
- return map(str, tx_result)
+ resultsdict = {}
+ pkgspec = pkgspec.replace(',',' ')
+ pkgtypes = pkgspec.split(' ')
+ for pkgtype in pkgtypes:
+ pkgtype = pkgtype.strip()
+ obj = ayum.doPackageLists(pkgnarrow=pkgtype)
+ if hasattr(obj, pkgtype):
+ thislist = getattr(obj, pkgtype)
+ output_list = sorted(map(str, thislist))
+ resultsdict[pkgtype] = output_list
+
+ return resultsdict
def check_update(self, filter=[], repo=None):
"""Returns a list of packages due to be updated
@@ -53,6 +139,7 @@
if type(filter) not in [list, tuple]:
filter = [filter]
+ import yum
ayum = yum.YumBase()
ayum.doConfigSetup()
ayum.doTsSetup()
@@ -70,6 +157,17 @@
return map(str, pkg_list)
+ def grep(self, word):
+ """
+ Grep info from module
+ """
+ results = {self.check_update:[]}
+ update_res = self.check_update()
+ results[self.check_update].extend([res for res in update_res if res.lower().find(word)!=-1])
+
+ return results
+ grep = func_module.findout(grep)
+
def register_method_args(self):
"""
Implementing the argument getter
@@ -86,6 +184,26 @@
},
'description':"Updating system according to a specified pattern"
},
+ 'install':{
+ 'args':{
+ 'pkg':{
+ 'type':'string',
+ 'optional':False,
+ 'description':"The yum pattern for installing package"
+ }
+ },
+ 'description':"install package(s) according to a specified pattern"
+ },
+ 'remove':{
+ 'args':{
+ 'pkg':{
+ 'type':'string',
+ 'optional':False,
+ 'description':"The yum pattern for removing package"
+ }
+ },
+ 'description':"remove package(s) according to a specified pattern"
+ },
'check_update':{
'args':{
'filter':{
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/minion/server.py
^
|
@@ -35,6 +35,8 @@
import codes
import func.module_loader as module_loader
import func.minion.acls as acls_mod
+from func import utils as func_utils
+
from certmaster import utils
from certmaster import requester
@@ -85,6 +87,9 @@
self.handlers["system.list_methods"] = self.list_methods
self.handlers["system.list_modules"] = self.list_modules
self.handlers["system.inventory"] = self.inventory
+ self.handlers["system.grep"] = self.grep
+ # ultimately need to add a method here to force the server to reload itself so all NEW connections
+ # get a new RequestHandler
def list_modules(self):
modules = self.modules.keys()
@@ -97,6 +102,32 @@
return methods
+ import func.minion.modules.func_module as fm
+ def grep(self,word):
+ """
+ Finding the wanted word
+ """
+
+ word = word.strip()
+ modules = self.modules.keys()
+ methods = self.handlers.keys()
+
+ return_dict = {}
+
+ #find modules
+ for m in modules:
+ if m.find(word)!=-1:
+ return_dict[self.list_modules]=m
+
+ #find methods
+ for m in methods:
+ if m.find(word)!=-1:
+ return_dict[self.list_methods]=m
+
+ return return_dict
+ grep = fm.findout(grep)
+
+
def inventory(self):
inventory = {}
@@ -106,23 +137,28 @@
# see which are where, but that seems lame -akl
for module in self.modules.keys():
inventory[module] = []
- for method in self.handlers.keys():
- # string match, ick.
- method_bits = method.split('.')
- method_module = string.join(method_bits[:-1], '.')
- method_name = method_bits[-1]
- if method_module == module:
- inventory[module].append(method_name)
+ for method in self.handlers.keys():
+ # string match, ick.
+ method_bits = method.split('.')
+ method_module = string.join(method_bits[:-1], '.')
+ method_name = method_bits[-1]
+ if method_module in inventory:
+ inventory[method_module].append(method_name)
return inventory
+
+
def get_dispatch_method(self, method):
if method in self.handlers:
return FuncApiMethod(self.logger, method, self.handlers[method])
-
else:
- self.logger.info("Unhandled method call for method: %s " % method)
+ module_name = string.join(method.split('.')[:-1], '.')
+ if module_name not in self.modules:
+ self.logger.exception("method %s called but %s module is not available" % (method, module_name))
+ raise codes.ModuleNotFoundException
+ self.logger.exception("Unhandled method call for method: %s " % method)
raise codes.InvalidMethodException
@@ -145,15 +181,16 @@
"""
(t, v, tb) = sys.exc_info()
- self.logger.info("Exception occured: %s" % t )
- self.logger.info("Exception value: %s" % v)
- self.logger.info("Exception Info:\n%s" % string.join(traceback.format_list(traceback.extract_tb(tb))))
+ self.logger.exception("Exception occured: %s" % t )
+ self.logger.exception("Exception value: %s" % v)
+ self.logger.exception("Exception Info:\n%s" % string.join(traceback.format_list(traceback.extract_tb(tb))))
def __call__(self, *args):
self.logger.debug("(X) -------------------------------------------")
try:
+ self.__method = func_utils.get_fresh_method_instance(self.__method)
rc = self.__method(*args)
except codes.FuncException, e:
self.__log_exc()
@@ -168,7 +205,9 @@
return rc
-def serve():
+
+
+def setup_server():
"""
Code for starting the XMLRPC service.
@@ -178,7 +217,12 @@
listen_port = config.listen_port
if listen_port == '':
listen_port = 51234
- server =FuncSSLXMLRPCServer((listen_addr, listen_port))
+ server = FuncSSLXMLRPCServer((listen_addr, listen_port), config.module_list)
+ return server
+
+def serve():
+
+ server = setup_server()
server.logRequests = 0 # don't print stuff to console
server.serve_forever()
@@ -187,30 +231,49 @@
class FuncXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer, XmlRpcInterface):
def __init__(self, args):
-
+ # DOES ANYTHING EVER USE THIS? SKV - 2011/02/23
self.allow_reuse_address = True
self.modules = module_loader.load_modules()
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, args)
XmlRpcInterface.__init__(self)
-
+from func.minion.facts.minion_query import *
class FuncSSLXMLRPCServer(AuthedXMLRPCServer.AuthedSSLXMLRPCServer,
XmlRpcInterface):
- def __init__(self, args):
+ def __init__(self, args, module_list=[]):
self.allow_reuse_address = True
- self.modules = module_loader.load_modules()
+ self.modules = module_loader.load_modules(module_list = module_list)
+
+ #load facts methods
+ self.fact_methods = load_fact_methods()
+ self.minion_query = FactsMinion(method_fact_list=self.fact_methods)
XmlRpcInterface.__init__(self)
- hn = utils.get_hostname()
- self.key = "%s/%s.pem" % (self.cm_config.cert_dir, hn)
- self.cert = "%s/%s.cert" % (self.cm_config.cert_dir, hn)
- self.ca = "%s/ca.cert" % self.cm_config.cert_dir
+ hn = func_utils.get_hostname_by_route()
- self._our_ca = certs.retrieve_cert_from_file(self.ca)
+ if self.config.key_file != '':
+ self.key = self.config.key_file
+ else:
+ # search case-insensitively to find the right key - take the first one - if there are
+ # more than one differing only by case then the user is going to get 'unique' behavior :)
+ self.key = func_utils.find_files_by_hostname(hn, self.cm_config.cert_dir, '.pem')[0]
+ if self.config.cert_file != '':
+ self.cert = self.config.cert_file
+ else:
+ self.cert = func_utils.find_files_by_hostname(hn, self.cm_config.cert_dir, '.cert')[0]
+
+ if self.config.ca_file != '':
+ self.ca = self.config.ca_file
+ else:
+ self.ca = "%s/ca.cert" % self.cm_config.cert_dir
+
+
+ self._our_ca = certs.retrieve_cert_from_file(self.ca)
self.acls = acls_mod.Acls(config=self.config)
-
+
+
AuthedXMLRPCServer.AuthedSSLXMLRPCServer.__init__(self, args,
self.key, self.cert,
self.ca)
@@ -223,14 +286,14 @@
"""
# take _this_request and hand it off to check out the acls of the method
# being called vs the requesting host
-
+
if not hasattr(self, '_this_request'):
raise codes.InvalidMethodException
-
+
r,a = self._this_request
peer_cert = r.get_peer_certificate()
ip = a[0]
-
+
# generally calling conventions are: hardware.info
# async convention is async.hardware.info
@@ -243,7 +306,7 @@
if not self.acls.check(self._our_ca, peer_cert, ip, method, params):
raise codes.AccessToMethodDenied
-
+
# Recognize ipython's tab completion calls
if method == 'trait_names' or method == '_getAttributeNames':
return self.handlers.keys()
@@ -254,9 +317,19 @@
try:
if not async_dispatch:
- return self.get_dispatch_method(method)(*params)
+ #check if we send some queries
+ if len(params)>0 and type(params[0]) == dict and params[0].has_key('__fact__'):
+ fact_result = self.minion_query.exec_query(params[0]['__fact__'],True)
+ else:
+ return self.get_dispatch_method(method)(*params)
+
+ if fact_result[0]: #that means we have True from query so can go on
+ method_result = self.get_dispatch_method(method)(*params[1:])
+ return [{'__fact__':fact_result},method_result]
+ else:
+ return [{'__fact__':fact_result}]
else:
- return jobthing.minion_async_run(self.get_dispatch_method, method, params)
+ return jobthing.minion_async_run(self.get_dispatch_method, method, params,self.minion_query)
except:
(t, v, tb) = sys.exc_info()
rc = utils.nice_exception(t, v, tb)
@@ -265,7 +338,22 @@
def auth_cb(self, request, client_address):
peer_cert = request.get_peer_certificate()
return peer_cert.get_subject().CN
-
+
+
+def excepthook(exctype, value, tracebackobj):
+ exctype_blurb = "Exception occured: %s" % exctype
+ excvalue_blurb = "Exception value: %s" % value
+ exctb_blurb = "Exception Info:\n%s" % string.join(traceback.format_list(traceback.extract_tb(tracebackobj)))
+
+ print exctype_blurb
+ print excvalue_blurb
+ print exctb_blurb
+
+ log = logger.Logger().logger
+ log.info(exctype_blurb)
+ log.info(excvalue_blurb)
+ log.info(exctb_blurb)
+
def main(argv):
@@ -273,21 +361,55 @@
Start things up.
"""
+ sys.excepthook = excepthook
if len(sys.argv) > 1 and sys.argv[1] == "--list-modules":
- module_names = module_loader.load_modules().keys()
+ config = read_config("/etc/func/minion.conf", FuncdConfig)
+ module_names = module_loader.load_modules(module_list = config.module_list).keys()
module_names.sort()
print "loaded modules:"
for foo in module_names:
print "\t" + foo
sys.exit(0)
+ if "--version" in sys.argv or "-v" in sys.argv:
+ print >> sys.stderr, file("/etc/func/version").read().strip()
+ sys.exit(0)
+
+ if "--info" in sys.argv:
+ server = setup_server()
+ print 'config:'
+ for l in str(server.config).split('\n'):
+ print '\t' + l
+
+ print 'server name: %s' % server.server_name
+ print 'server listen addr: %s:%s' % server.server_address
+ print 'key file: %s' % server.key
+ print 'cert file: %s' % server.cert
+ print 'ca file: %s' % server.ca
+ cert = certs.retrieve_cert_from_file(server.cert)
+ print 'cert dn: %s' % cert.get_subject().CN
+ print 'certificate hash: %s' % cert.subject_name_hash()
+ print 'modules loaded:'
+ for mn in sorted(server.modules.keys()):
+ print '\t' + mn
+ print 'acls:'
+ for (host, methods) in server.acls.acls.items():
+ print '\t' + host + ' : ' + str(methods)
+ print 'facts:'
+ for (n, meth) in server.fact_methods.items():
+ print '\t' + n + ' : ' + meth()
+ sys.exit(0)
+
if "daemon" in sys.argv or "--daemon" in sys.argv:
utils.daemonize("/var/run/funcd.pid")
else:
print "serving...\n"
try:
- requester.request_cert()
+ config = read_config("/etc/func/minion.conf", FuncdConfig)
+ if config.use_certmaster:
+ hn = func_utils.get_hostname_by_route()
+ requester.request_cert(hn)
serve()
except codes.FuncException, e:
print >> sys.stderr, 'error: %s' % e
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/module_loader.py
^
|
@@ -16,12 +16,15 @@
import distutils.sysconfig
import os
import sys
+import traceback
import inspect
from gettext import gettext
+import fnmatch
+
_ = gettext
+
from func import logger
-logger = logger.Logger().logger
from inspect import isclass
from func.minion.modules import func_module
@@ -41,6 +44,7 @@
return module_files
def load_methods(path, main_class, parent_class=None):
+ log = logger.Logger().logger
methods = {}
modules = load_modules(path, main_class, parent_class=parent_class)
for x in modules.keys():
@@ -49,7 +53,9 @@
methods["%s.%s" % (x,method)]=getattr(modules[x], method)
return methods
-def load_modules(path='func/minion/modules/', main_class=func_module.FuncModule, blacklist=None, parent_class=None):
+def load_modules(path='func/minion/modules/', main_class=func_module.FuncModule,
+ blacklist=None, parent_class=None, module_list=[]):
+ log = logger.Logger().logger
python_path = distutils.sysconfig.get_python_lib()
module_file_path = "%s/%s" % (python_path, path)
(mod_path, mod_dir) = os.path.split(os.path.normpath(module_file_path))
@@ -81,6 +87,14 @@
mod_imp_name = pathname.replace("/", ".")
+ if module_list: # only do this if we have a module list at all, otherwise everything comes in
+ matched = False
+ for match in module_list:
+ if fnmatch.fnmatch(mod_imp_name, match):
+ matched = True
+ if not matched: # if we are not matched against anything in the module_list then skip it
+ continue
+
if mods.has_key(mod_imp_name):
# If we've already imported mod_imp_name, don't import it again
continue
@@ -95,7 +109,7 @@
for obj in dir(blip):
attr = getattr(blip, obj)
if isclass(attr) and issubclass(attr, main_class):
- logger.debug("Loading %s module" % attr)
+ log.debug("Loading %s module" % attr)
if parent_class:
mods[mod_imp_name] = attr(parent_class)
else:
@@ -103,13 +117,17 @@
except ImportError, e:
# A module that raises an ImportError is (for now) simply not loaded.
- errmsg = _("Could not load %s module: %s")
- logger.warning(errmsg % (mod_imp_name, e))
+ errmsg = _("Import error while loading %s module: %s")
+ log.warning(errmsg % (mod_imp_name, e))
+ etype, value, tb = sys.exc_info()
+ log.warning(traceback.format_exception(etype, value, tb))
bad_mods[mod_imp_name] = True
continue
except:
errmsg = _("Could not load %s module")
- logger.warning(errmsg % (mod_imp_name))
+ log.warning(errmsg % (mod_imp_name))
+ etype, value, tb = sys.exc_info()
+ log.warning(traceback.format_exception(etype, value, tb))
bad_mods[mod_imp_name] = True
continue
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/base_command.py
^
|
@@ -1,4 +1,3 @@
-#!/usr/bin/python
"""
Copyright 2008, Red Hat, Inc
Adrian Likins <alikins@redhat.com>
@@ -11,6 +10,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
+import sys
import command
import client
@@ -21,8 +21,6 @@
DEFAULT_PORT = 51234
DEFAULT_MAPLOC = "/var/lib/func/map"
-# FIXME
-CONFIG_FILE="/etc/func/minion.conf"
class BaseCommand(command.Command):
""" wrapper class for commands with some convience functions, namely
@@ -36,20 +34,22 @@
delegate=False
mapfile=DEFAULT_MAPLOC
- # temporary work around FIXME
+ # temporary work around FIXME
# we really need a way to store what port each minion is
# listening on, though this is probably workable for most
# cases. Though it should probably be a different config
# file, since FuncdConfig is for the minion server, not
- config = read_config(CONFIG_FILE, commonconfig.FuncdConfig)
- port = config.listen_port
def getOverlord(self):
+ ol_config = None
+ if self.parentCommand.conffile:
+ ol_config = read_config(self.parentCommand.conffile, commonconfig.OverlordConfig)
self.overlord_obj = client.Overlord(self.server_spec,
- port=self.port,
interactive=self.interactive,
verbose=self.verbose,
- config=self.config,
async=self.async,
nforks=self.forks,
delegate=self.delegate,
- mapfile=self.mapfile)
+ mapfile=self.mapfile,
+ timeout=self.parentCommand.socket_timeout,
+ exclude_spec=self.parentCommand.exclude_spec,
+ config=ol_config)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/client.py
^
|
@@ -12,16 +12,22 @@
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
+from func.jobthing import RETAIN_INTERVAL
import sys
-import glob
import os
import time
+import shlex
+import subprocess
+from exceptions import Exception
+import re
+import fnmatch
import func.yaml as yaml
from certmaster.commonconfig import CMConfig
from certmaster import utils
from certmaster.config import read_config, CONFIG_FILE
+from func.commonconfig import FuncdConfig, FUNCD_CONFIG_FILE, OverlordConfig, OVERLORD_CONFIG_FILE
import sslclient
@@ -33,12 +39,18 @@
from func.CommonErrors import *
import func.module_loader as module_loader
from func.overlord import overlord_module
+from func import utils as func_utils
+from func.minion.facts.overlord_query import OverlordQuery,display_active_facts
# ===================================
# defaults
# TO DO: some of this may want to come from config later
+# this can also be set in /etc/func/minion.conf:listen_port
DEFAULT_PORT = 51234
+#override in /etc/func/overlord.conf.
+DEFAULT_TIMEOUT = None
+
FUNC_USAGE = "Usage: %s [ --help ] [ --verbose ] target.example.org module method arg1 [...]"
DEFAULT_MAPLOC = "/var/lib/func/map"
DELEGATION_METH = "delegation.run"
@@ -68,6 +80,11 @@
raise AttributeError("no method called: %s" % ".".join(self.base))
module = self.base[0]
method = ".".join(self.base[1:])
+ #here we will inject some variables that will do the facts stuff
+ if self.clientref.overlord_query.fact_query and module != "local":
+ #here get the serializaed object and add
+ #at the top of the args ...
+ args =[{'__fact__':self.clientref.overlord_query.serialize_query()}]+list(args)
return self.clientref.run(module,method,args,nforks=self.nforks)
# ===================================
@@ -75,10 +92,10 @@
# other modules with a Overlord class
class Minions(object):
- def __init__(self, spec, port=51234,
+ def __init__(self, spec, port=51234,
noglobs=None, verbose=None,
- just_fqdns=False, groups_file=None,
- delegate=False, minionmap={}):
+ just_fqdns=False, groups_backend="conf",
+ delegate=False, minionmap={},exclude_spec=None,**kwargs):
self.spec = spec
self.port = port
@@ -87,57 +104,171 @@
self.just_fqdns = just_fqdns
self.delegate = delegate
self.minionmap = minionmap
+ self.exclude_spec = exclude_spec
- self.config = read_config(CONFIG_FILE, CMConfig)
- self.group_class = groups.Groups(filename=groups_file)
-
- self.all_hosts = []
- self.all_certs = []
+ self.cm_config = read_config(CONFIG_FILE, CMConfig)
+ self.overlord_config = read_config(OVERLORD_CONFIG_FILE, OverlordConfig)
+ self.group_class = groups.Groups(backend=groups_backend,
+ get_hosts_for_spec=self.get_hosts_for_spec,
+ **kwargs)
+
+ #lets make them sets so we dont loop again and again
+ self.all_hosts = set()
+ self.all_certs = set()
self.all_urls = []
+ self._downed_hosts = []
def _get_new_hosts(self):
- self.new_hosts = self.group_class.get_hosts_by_group_glob(self.spec)
+ self.new_hosts = self._get_group_hosts(self.spec)
return self.new_hosts
- def _get_all_hosts(self):
- seperate_gloobs = self.spec.split(";")
- seperate_gloobs = seperate_gloobs + self.new_hosts
+ def _get_group_hosts(self,spec):
+ return self.group_class.get_hosts_glob(spec)
+
+ def _get_hosts_for_specs(self,seperate_gloobs):
+ """
+ Gets the hosts and certs for proper spec
+ """
+ tmp_hosts = set()
+ tmp_certs = set()
for each_gloob in seperate_gloobs:
- #if there is some string from group glob just skip it
if each_gloob.startswith('@'):
continue
- actual_gloob = "%s/%s.%s" % (self.config.certroot, each_gloob, self.config.cert_extension)
- certs = glob.glob(actual_gloob)
- for cert in certs:
- #if the spec includes some groups and also it includes some *
- #may cause some duplicates so should check that
- #For example spec = "@home_group;*" will give lots of duplicates as a result
- if not cert in self.all_certs:
- self.all_certs.append(cert)
- # use basename to trim off any excess /'s, fix
- # ticket #53 "Trailing slash in certmaster.conf confuses glob function
- certname = os.path.basename(cert.replace(self.config.certroot, ""))
- host = certname[:-(len(self.config.cert_extension) + 1)]
- self.all_hosts.append(host)
+ h,c = self._get_hosts_for_spec(each_gloob)
+ tmp_hosts = tmp_hosts.union(h)
+ tmp_certs = tmp_certs.union(c)
+
+ return tmp_hosts,tmp_certs
+
+ def _get_hosts_for_spec(self,each_gloob):
+ """
+ Pull only for specified spec
+ """
+ #these will be returned
+ tmp_certs = set()
+ tmp_hosts = set()
+
+ # if call is delegated find the shortest path to the minion and use the sub-overlord's certificate
+ if self.delegate:
+ found_path = ''
+ for gloob in func_utils.get_all_host_aliases(each_gloob):
+ shortest_path = dtools.get_shortest_path(each_gloob,self.minionmap)
+ if shortest_path:
+ found_path = shortest_path[0]
+ if not found_path:
+ return tmp_hosts,tmp_certs
+ each_gloob = shortest_path[0]
+
+ if not os.access(self.cm_config.certroot, os.R_OK):
+ if self.overlord_config.allow_unknown_minions:
+ tmp_hosts.add(each_gloob)
+ else:
+ sys.stderr.write("Cannot read certs dir: %s and cannot use unknown minion\n" % (self.cm_config.certroot))
+
+ return tmp_hosts,tmp_certs
+
+ #actual_gloob = "%s/%s.%s" % (self.cm_config.certroot, each_gloob, self.cm_config.cert_extension)
+ certs = func_utils.find_files_by_hostname(each_gloob, self.cm_config.certroot, self.cm_config.cert_extension)
+
+ # pull in peers if enabled for minion-to-minion
+ if self.cm_config.peering:
+ #peer_gloob = "%s/%s.%s" % (self.cm_config.peerroot, each_gloob, self.cm_config.cert_extension)
+ certs += func_utils.find_files_by_hostname(each_gloob, self.cm_config.peerroot, self.cm_config.cert_extension)
+
+
+ # if we can't match this gloob and the gloob is not REALLY a glob
+ # let the gloob be the hostname we try to connect to.
+ if not certs and not func_utils.re_glob(each_gloob):
+ found_by_alias = False
+ aliases = func_utils.get_all_host_aliases(each_gloob)
+
+ for name in aliases:
+ #actual_gloob = "%s/%s.%s" % (self.cm_config.certroot, name, self.cm_config.cert_extension)
+ certs += func_utils.find_files_by_hostname(name, self.cm_config.certroot, self.cm_config.cert_extension)
+ if self.cm_config.peering:
+ #peer_gloob = "%s/%s.%s" % (self.cm_config.peerroot, name, self.cm_config.cert_extension)
+ certs += func_utils.find_files_by_hostname(name, self.cm_config.peerroot, self.cm_config.cert_extension)
+ break
+
+ if self.overlord_config.allow_unknown_minions and not certs:
+ tmp_hosts.add(each_gloob)
+
+ for cert in certs:
+ tmp_certs.add(cert)
+ # use basename to trim off any excess /'s, fix
+ # ticket #53 "Trailing slash in certmaster.conf confuses glob function
+ certname = os.path.basename(cert.replace(self.cm_config.certroot, ""))
+ if self.cm_config.peering:
+ certname = os.path.basename(certname.replace(self.cm_config.peerroot, ""))
+ host = certname[:-(len(self.cm_config.cert_extension) + 1)]
+ tmp_hosts.add(host)
+
+ return tmp_hosts,tmp_certs
+
+ def get_hosts_for_spec(self,spec):
+ """
+ Be careful when editting that method it will be used
+ also by groups api to pull machines to have better
+ glob control there ...
+ """
+ return self._get_hosts_for_spec(spec)[0]
+
+
+
+ def _get_all_hosts(self):
+ """
+ Gets hosts that are included and excluded by user
+ a better orm like spec so user may say
+ func "*" --exclude "www.*;@mygroup" ...
+ """
+ included_part = self._get_hosts_for_specs(self.spec.split(";")+self.new_hosts)
+ self.all_certs=self.all_certs.union(included_part[1])
+ self.all_hosts=self.all_hosts.union(included_part[0])
+ #excluded ones
+ if self.exclude_spec:
+ #get first groups ypu dont want to run :
+ group_exclude = self._get_group_hosts(self.exclude_spec)
+ excluded_part = self._get_hosts_for_specs(self.exclude_spec.split(";")+group_exclude)
+ self.all_certs = self.all_certs.difference(excluded_part[1])
+ self.all_hosts = self.all_hosts.difference(excluded_part[0])
+
+
def get_all_hosts(self):
+ """
+ Get current host list
+ """
self._get_new_hosts()
self._get_all_hosts()
- return self.all_hosts
- def get_urls(self):
- self._get_new_hosts()
- self._get_all_hosts()
- for host in self.all_hosts:
+ #we keep it all the time as a set so
+ return list(self.all_hosts)
+
+ def get_urls(self, hosts=[]):
+ if not hosts:
+ self._get_new_hosts()
+ self._get_all_hosts()
+ hosts = self.all_hosts
+
+ results = []
+
+ for host in hosts:
+ if host in self.downed_hosts:
+ if self.verbose:
+ sys.stderr.write("%s excluded due to being listed in %s\n" % (host, self.overlord_config.host_down_list))
+ continue
if not self.just_fqdns:
- self.all_urls.append("https://%s:%s" % (host, self.port))
+ host_res = "https://%s:%s" % (host, self.port)
else:
- self.all_urls.append(host)
-
- if self.verbose and len(self.all_urls) == 0:
+ host_res = host
+
+ if not host_res in results: # this might get slow if there are thousands of hosts
+ results.append(host_res)
+
+ if self.verbose and len(results) == 0:
sys.stderr.write("no hosts matched\n")
- return self.all_urls
+ return results
# FIXME: hmm, dont like this bit of the api... -al;
def is_minion(self):
@@ -146,6 +277,151 @@
return True
return False
+ def _get_downed_hosts(self):
+ """returns a list of minions which are known to not be up"""
+ if self._downed_hosts:
+ return self._downed_hosts
+
+ hosts = []
+ if self.overlord_config.host_down_list and \
+ os.path.exists(self.overlord_config.host_down_list):
+ fo = open(self.overlord_config.host_down_list, 'r')
+ for line in fo.readlines():
+ if re.match('\s*(#|$)', line):
+ continue
+ hn = line.replace('\n','')
+ if hn not in hosts:
+ hosts.append(hn)
+ fo.close()
+
+ self._downed_hosts = hosts
+
+ return self._downed_hosts
+
+ downed_hosts = property(fget=lambda self: self._get_downed_hosts())
+
+class PuppetMinions(Minions):
+ def __init__(self, spec, port=51234,
+ noglobs=None, verbose=None,
+ just_fqdns=False, groups_backend="conf",
+ delegate=False, minionmap={},exclude_spec=None,**kwargs):
+ # local host_inv cache
+ self._host_inv = {}
+ self._revoked_serials = []
+
+ Minions.__init__(self, spec, port=port, noglobs=noglobs, verbose=verbose,
+ just_fqdns=just_fqdns, groups_backend=groups_backend,
+ delegate=delegate, minionmap=minionmap,
+ exclude_spec=exclude_spec,**kwargs)
+
+ def _get_hosts_for_spec(self,each_gloob):
+ """
+ Pull only for specified spec
+ """
+ #these will be returned
+ tmp_certs = set()
+ tmp_hosts = set()
+ if not self._host_inv:
+ # get all hosts
+ if os.access(self.overlord_config.puppet_inventory, os.R_OK):
+ fo = open(self.overlord_config.puppet_inventory, 'r')
+ host_inv = {}
+ time_format = '%Y-%m-%dT%H:%M:%S%Z'
+ now = time.time()
+ for line in fo.readlines():
+ if re.match('\s*(#|$)', line):
+ continue
+ try:
+ (serial, before, after, cn) = line.split()
+ except ValueError:
+ continue
+ before = time.strftime('%s', time.strptime(before, time_format))
+ if now < int(before):
+ continue
+ after = time.strftime('%s', time.strptime(after, time_format))
+ if now > int(after):
+ continue
+
+ hn = cn.replace('/CN=','')
+ hn = hn.replace('\n','')
+ if hn in host_inv:
+ if host_inv[hn] > serial:
+ continue
+ host_inv[hn] = serial
+ fo.close()
+ self._host_inv = host_inv # store ours
+
+ # if call is delegated find the shortest path to the minion and use the sub-overlord's certificate
+ if self.delegate:
+ try:
+ each_gloob = func_utils.get_all_host_aliases(each_gloob)[0]
+ shortest_path = dtools.get_shortest_path(each_gloob, self.minionmap)
+ except IndexError:
+ return tmp_hosts,tmp_certs
+ else:
+ each_gloob = shortest_path[0]
+
+ # revoked certs
+ self._return_revoked_serials(self.overlord_config.puppet_crl)
+ for hostname in self._host_inv.keys():
+ if int(self._host_inv[hostname], 16) in self._revoked_serials:
+ continue
+ pempath = '%s/%s.pem' % (self.overlord_config.puppet_signed_certs_dir, hostname)
+ if not os.path.exists(pempath):
+ continue
+ matched_gloob = False
+ if fnmatch.fnmatch(hostname, each_gloob):
+ matched_gloob = True
+ tmp_hosts.add(hostname)
+
+ # if we can't match this gloob and the gloob is not REALLY a glob
+ # then toss this at gethostbyname_ex() and see if any of the cname
+ # or aliases matches _something_ we know about
+ if not matched_gloob and not func_utils.re_glob(each_gloob):
+ found_by_alias = False
+ aliases = func_utils.get_all_host_aliases(each_gloob)
+ for name in aliases:
+ if name in self._host_inv and int(self._host_inv[name], 16) not in self._revoked_serials:
+ if os.path.exists(self.overlord_config.puppet_signed_certs_dir + '/' + name + '.pem'):
+ tmp_hosts.add(name)
+ found_by_alias = True
+ break
+
+ if self.overlord_config.allow_unknown_minions and not found_by_alias:
+ tmp_hosts.add(each_gloob)
+
+ # don't return certs path - just hosts
+
+ return tmp_hosts,tmp_certs
+
+ def _return_revoked_serials(self, crlfile):
+ if not self._revoked_serials:
+
+ serials = []
+ try:
+ crltext = open(crlfile, 'r').read()
+ from OpenSSL import crypto
+ crl = crypto.load_crl(crypto.FILETYPE_PEM, crltext)
+ revs = crl.get_revoked()
+ for revoked in revs:
+ serials.append(str(revoked.get_serial()))
+
+ except (ImportError, AttributeError), e:
+ call = '/usr/bin/openssl crl -text -noout -in %s' % crlfile
+ call = shlex.split(call)
+ serials = []
+ (res,err) = subprocess.Popen(call, stdout=subprocess.PIPE).communicate()
+ for line in res.split('\n'):
+ if line.find('Serial Number:') == -1:
+ continue
+ (crap, serial) = line.split(':')
+ serial = serial.strip()
+ serial = int(serial, 16)
+ serials.append(serial)
+
+ self._revoked_serials = serials
+
+
# does the hostnamegoo actually expand to anything?
@@ -168,7 +444,7 @@
def __init__(self, server_spec, port=DEFAULT_PORT, interactive=False,
verbose=False, noglobs=False, nforks=1, config=None, async=False, init_ssl=True,
- delegate=False, mapfile=DEFAULT_MAPLOC):
+ delegate=None, mapfile=DEFAULT_MAPLOC, timeout=None,exclude_spec=None):
"""
Constructor.
@server_spec -- something like "*.example.org" or "foosball"
@@ -177,16 +453,35 @@
@noglobs -- specifies server_spec is not a glob, and run should return single values
@config -- optional config object
"""
- self.config = config
- if config is None:
- self.config = read_config(CONFIG_FILE, CMConfig)
-
+
+ self.cm_config = read_config(CONFIG_FILE, CMConfig)
+ self.funcd_config = read_config(FUNCD_CONFIG_FILE, FuncdConfig)
+ self.config = read_config(OVERLORD_CONFIG_FILE, OverlordConfig)
+ if config:
+ self.config = config
+
+ self.overlord_config = self.config # for backward compat
+
self.server_spec = server_spec
+ self.exclude_spec = exclude_spec
self.port = port
+ if self.config.listen_port:
+ self.port = self.config.listen_port
+
self.verbose = verbose
self.interactive = interactive
self.noglobs = noglobs
+
+ # the default
+ self.timeout = DEFAULT_TIMEOUT
+ # the config file
+ if self.config.socket_timeout != 0.0:
+ self.timeout = self.config.socket_timeout
+ # commandline
+ if timeout:
+ self.timeout = timeout
+
self.nforks = nforks
self.async = smart_bool(async)
#FIXME: async should never ne none, yet it is -akl
@@ -194,47 +489,73 @@
self.async = False
self.delegate = delegate
+ if delegate is None:
+ self.delegate = self.config.delegate
self.mapfile = mapfile
-
+ self.minionmap = {}
+
+ self.allow_unknown_minions = self.config.allow_unknown_minions
+
+ #overlord_query stuff
+ self.overlord_query = OverlordQuery()
+ if self.config.puppet_minions:
+ self._mc = PuppetMinions
+ else:
+ self._mc = Minions
- self.minions_class = Minions(self.server_spec, port=self.port, noglobs=self.noglobs, verbose=self.verbose)
- self.minions = self.minions_class.get_urls()
- if len(self.minions) == 0:
- raise Func_Client_Exception, 'Can\'t find any minions matching \"%s\". ' % self.server_spec
-
if self.delegate:
try:
mapstream = file(self.mapfile, 'r').read()
self.minionmap = yaml.load(mapstream).next()
- except e:
- sys.stderr.write("mapfile load failed, switching delegation off")
+ except Exception, e:
+ sys.stderr.write("mapfile load failed, switching delegation off\n")
self.delegate = False
-
+
+ self.minions_class = self._mc(self.server_spec, port=self.port,
+ noglobs=self.noglobs, verbose=self.verbose,
+ delegate=self.delegate,minionmap=self.minionmap,
+ exclude_spec=self.exclude_spec)
+ # once we setup the minionsclass insert our current config object
+ self.minions_class.overlord_config = self.config
+ self.minions = self.minions_class.get_urls()
+
+ if len(self.minions) == 0:
+ raise Func_Client_Exception, 'Can\'t find any minions matching \"%s\". ' % self.server_spec
+
if init_ssl:
self.setup_ssl()
self.methods = module_loader.load_methods('func/overlord/modules/', overlord_module.BaseModule, self)
-
+
def setup_ssl(self, client_key=None, client_cert=None, ca=None):
+ self.ca = self.key = self.cert = ''
# defaults go:
# certmaster key, cert, ca
# funcd key, cert, ca
# raise FuncClientError
- ol_key = '%s/certmaster.key' % self.config.cadir
- ol_crt = '%s/certmaster.crt' % self.config.cadir
- myname = utils.get_hostname(talk_to_certmaster=False)
+
+ if not client_key and self.config.key_file != '':
+ client_key = self.config.key_file
+ if not client_cert and self.config.cert_file != '':
+ client_cert = self.config.cert_file
+ if not ca and self.config.ca_file != '':
+ ca = self.config.ca_file
+
+ ol_key = '%s/certmaster.key' % self.cm_config.cadir
+ ol_crt = '%s/certmaster.crt' % self.cm_config.cadir
+ myname = func_utils.get_hostname_by_route()
# FIXME: should be config -akl?
# maybe /etc/pki/func is a variable somewhere?
fd_key = '/etc/pki/certmaster/%s.pem' % myname
fd_crt = '/etc/pki/certmaster/%s.cert' % myname
- self.ca = '%s/certmaster.crt' % self.config.cadir
- if client_key and client_cert and ca:
- if (os.access(client_key, os.R_OK) and os.access(client_cert, os.R_OK)
- and os.access(ca, os.R_OK)):
- self.key = client_key
- self.cert = client_cert
- self.ca = ca
+ self.ca = '%s/certmaster.crt' % self.cm_config.cadir
+ if not os.access(self.ca, os.R_OK):
+ self.ca = '%s/ca.cert' % self.cm_config.cert_dir
+ if client_key and client_cert and ca:
+ self.key = client_key
+ self.cert = client_cert
+ self.ca = ca
# otherwise fall through our defaults
elif os.access(ol_key, os.R_OK) and os.access(ol_crt, os.R_OK):
self.key = ol_key
@@ -245,10 +566,18 @@
else:
raise Func_Client_Exception, 'Cannot read ssl credentials: ssl, cert, ca. '+\
'Ensure you have permission to read files in /etc/pki/certmaster/ directory.'
+
+ if not os.access(self.ca, os.R_OK):
+ raise Func_Client_Exception, 'Cannot read ssl ca: %s' % self.ca
+ if not os.access(self.key, os.R_OK):
+ raise Func_Client_Exception, 'Cannot read ssl key: %s' % self.key
+ if not os.access(self.cert, os.R_OK):
+ raise Func_Client_Exception, 'Cannot read ssl cert: %s' % self.cert
+
-
-
+
+
def __getattr__(self, name):
"""
This getattr allows manipulation of the object as if it were
@@ -262,7 +591,6 @@
# WARNING: any missing values in Overlord's source will yield
# strange errors with this engaged. Be aware of that.
"""
-
return CommandAutomagic(self, [name], self.nforks)
# -----------------------------------------------
@@ -271,7 +599,13 @@
"""
Use this to acquire status from jobs when using run with async client handles
"""
- return jobthing.job_status(jobid, client_class=Overlord)
+ status,async_result = jobthing.job_status(jobid, client_class=Overlord,
+ client_class_config=self.config)
+ if not self.overlord_query.fact_query:
+ #that will use the default overlord job_status
+ return (status,display_active_facts(async_result))
+ else:
+ return (status,self.overlord_query.display_active(async_result))
# -----------------------------------------------
@@ -281,7 +615,107 @@
to get current ids with their short results in the database
"""
return jobthing.get_open_ids()
-
+
+
+ def tail_log(self,job_id,host=None,remove_old=None):
+ """
+ Method will read from minion the log file
+ which matches the job_id and gives back the
+ output of it ,pretty easy ...
+ """
+ from func.index_db import get_index_data,delete_index_data
+ from func.jobthing import JOB_ID_FINISHED,JOB_ID_LOST_IN_SPACE,JOB_ID_REMOTE_ERROR,JOB_ID_RUNNING
+ RETAIN_INTERVAL = 60 * 60
+ import time
+
+ code,result = Overlord(self.server_spec).job_status(job_id)
+ if code == JOB_ID_RUNNING:
+ return (None,False)
+ index_data = get_index_data()
+
+ #if we should remove old ones
+ if remove_old:
+ rm_list = []
+ now = time.time()
+ for job_id,minion_tuple in index_data.iteritems():
+ job_key = job_id.split("-")
+ job_key = job_key[len(job_key)-1]
+ if (now - float(job_key)) > RETAIN_INTERVAL:
+ rm_list.append(job_id)
+ #deleting the old ones
+ print "I will delete those : ",rm_list
+ delete_index_data(rm_list)
+
+ host_output = {}
+ if index_data.has_key(job_id):
+ host_tuple = index_data[job_id]
+ #h_t is a tuple of (minion_id,host)
+ if not host:
+ for h_t in host_tuple:
+ tmp_res = Overlord(h_t[1]).jobs.tail_output(h_t[0])
+ host_output.update(tmp_res)
+ else:#we want only a host
+ for h_t in host_tuple:
+ if h_t[1] == host:
+ tmp_res = Overlord(h_t[1]).jobs.tail_output(h_t[0])
+ host_output.update(tmp_res)
+ break
+ if not host_output:
+ return (None,True)
+ else:
+ return (None,True)
+
+ if code in [JOB_ID_FINISHED,JOB_ID_LOST_IN_SPACE,JOB_ID_REMOTE_ERROR]:
+ #means that job isfinished there is no need to wait for more
+ return (host_output,True)
+ #means that job is NOT finished there is more data to come
+ else:
+ return (host_output,False)
+
+ def check_progress(self,job_id,host):
+ """
+ Method will get from minion side
+ the progress which is (current,all)
+ formatted.
+ """
+
+ from func.index_db import get_index_data
+ from func.jobthing import JOB_ID_FINISHED,JOB_ID_LOST_IN_SPACE,JOB_ID_REMOTE_ERROR,JOB_ID_RUNNING
+
+ code,result = Overlord(self.server_spec).job_status(job_id)
+ if code == JOB_ID_RUNNING:
+ return (None,False)
+ index_data = get_index_data()
+
+ host_output = {}
+ if index_data.has_key(job_id):
+ host_tuple = index_data[job_id]
+ #h_t is a tuple of (minion_id,host)
+ for h_t in host_tuple:
+ if h_t[1] == host:
+ tmp_res = Overlord(h_t[1]).jobs.get_progress(h_t[0])
+ host_output.update(tmp_res)
+ break
+ if not host_output:
+ return (None,True)
+ else:
+ return (None,True)
+
+ if code in [JOB_ID_FINISHED,JOB_ID_LOST_IN_SPACE,JOB_ID_REMOTE_ERROR]:
+ #means that job isfinished there is no need to wait for more
+ if host_output[host] == [0,0]:
+ return (None,True)
+ else:
+ return (host_output,True)
+ #means that job is NOT finished there is more data to come
+ else:
+ if host_output[host] == [0,0]:
+ return (None,False)
+ else:
+ return (host_output,False)
+
+
+
def list_minions(self, format='list'):
"""
Returns a flat list containing the minions this Overlord object currently
@@ -294,7 +728,7 @@
if minion not in minionlist: #ugh, brute force :(
minionlist.append(minion)
return minionlist
-
+
# -----------------------------------------------
def run(self, module, method, args, nforks=1):
@@ -315,18 +749,22 @@
raise AttributeError("No such local method: %s" % method)
if not self.delegate: #delegation is turned off, so run normally
- return self.run_direct(module, method, args, nforks)
-
+ minion_result = self.run_direct(module, method, args, nforks)
+ if self.overlord_query.fact_query:
+ return self.overlord_query.display_active(minion_result)
+ else:
+ return minion_result
+
delegatedhash = {}
directhash = {}
completedhash = {}
-
+
#First we get all call paths for minions not directly beneath this overlord
dele_paths = dtools.get_paths_for_glob(self.server_spec, self.minionmap)
-
+
#Then we group them together in a dictionary by a common next hop
(single_paths,grouped_paths) = dtools.group_paths(dele_paths)
-
+
for group in grouped_paths.keys():
delegatedhash.update(self.run_direct(module,
method,
@@ -334,12 +772,13 @@
nforks,
call_path=grouped_paths[group],
suboverlord=group))
-
+
#Next, we run everything that can be run directly beneath this overlord
#Why do we do this after delegation calls? Imagine what happens when
#reboot is called...
- directhash.update(self.run_direct(module,method,args,nforks))
-
+ if single_paths != []:
+ directhash.update(self.run_direct(module,method,args,nforks))
+
#poll async results if we've async turned on
if self.async:
while (len(delegatedhash) + len(directhash)) > 0:
@@ -353,7 +792,7 @@
else:
completedhash.update(async_results[minion])
del delegatedhash[minion]
-
+
for minion in directhash.keys():
results = directhash[minion]
(return_code, async_results) = self.job_status(results)
@@ -365,15 +804,24 @@
completedhash.update(async_results)
del directhash[minion]
time.sleep(0.1) #pause a bit so we don't flood our minions
- return completedhash
-
+ if self.overlord_query.fact_query:
+ return self.overlord_query.display_active(completedhash)
+ else:
+ return completedhash
+
+
+
#we didn't instantiate this Overlord in async mode, so we just return the
#result hash
completedhash.update(delegatedhash)
completedhash.update(directhash)
- return completedhash
-
-
+
+ if self.overlord_query.fact_query:
+ return self.overlord_query.display_active(completedhash)
+ else:
+ return completedhash
+
+
# -----------------------------------------------
def run_direct(self, module, method, args, nforks=1, *extraargs, **kwargs):
@@ -390,13 +838,13 @@
results = {}
spec = ''
minionurls = []
+ minion_hosts = []
use_delegate = False
delegation_path = []
-
+
def process_server(bucketnumber, buckets, server):
-
- conn = sslclient.FuncServer(server, self.key, self.cert, self.ca )
+ conn = sslclient.FuncServer(server, self.key, self.cert, self.ca, self.timeout)
# conn = xmlrpclib.ServerProxy(server)
if self.interactive:
@@ -424,7 +872,7 @@
# this is the point at which we make the remote call.
if use_delegate:
retval = getattr(conn, meth)(module,
- method,
+ method,
args,
delegation_path,
self.async,
@@ -434,7 +882,7 @@
if self.interactive:
print retval
-
+
except Exception, e:
(t, v, tb) = sys.exc_info()
retval = utils.nice_exception(t,v,tb)
@@ -449,25 +897,25 @@
right = server.rfind(":")
server_name = server[left:right]
return (server_name, retval)
-
+
if kwargs.has_key('call_path'): #we're delegating if this key exists
delegation_path = kwargs['call_path']
spec = kwargs['suboverlord'] #the sub-overlord directly beneath this one
- minionobj = Minions(spec, port=self.port, verbose=self.verbose)
+ minions_hosts = self.minions_class.get_hosts_for_spec(spec)
use_delegate = True #signal to process_server to call delegate method
- minionurls = minionobj.get_urls() #the single-item url list to make async
+ minionurls = self.minions_class.get_urls(hosts=minion_hosts) #the single-item url list to make async
#tools such as jobthing/forkbomb happy
else: #we're directly calling minions, so treat everything normally
spec = self.server_spec
- minionurls = self.minions
+ minionurls = self.minions_class.get_urls()
+
#print "Minion_url is :",minionurls
#print "Process server is :",process_server
-
+
if not self.noglobs:
if self.nforks > 1 or self.async:
# using forkbomb module to distribute job over multiple threads
if not self.async:
-
results = forkbomb.batch_run(minionurls, process_server, nforks)
else:
minion_info =dict(spec=spec,module=module,method=method)
@@ -477,24 +925,25 @@
results = {}
for x in minionurls:
(nkey,nvalue) = process_server(0, 0, x)
- results[nkey] = nvalue
+ results[nkey] = nvalue
else:
+
# globbing is not being used, but still need to make sure
# URI is well formed.
# expanded = expand_servers(self.server_spec, port=self.port, noglobs=True, verbose=self.verbose)[0]
- expanded_minions = Minions(spec, port=self.port, noglobs=True, verbose=self.verbose)
+ expanded_minions = self._mc(spec, port=self.port, noglobs=True, verbose=self.verbose)
minions = expanded_minions.get_urls()[0]
results = process_server(0, 0, minions)
-
+
if self.delegate and self.async:
return {spec:results}
-
+
if use_delegate:
if utils.is_error(results[spec]):
print results
return results
return results[spec]
-
+
return results
# -----------------------------------------------
@@ -526,8 +975,124 @@
max = x
return max
+ def filter(self,*args,**kwargs):
+ """
+ Filter The facts and doesnt call
+ the minion directly just gives back a
+ reference to the same object ANDED
+ """
+
+ #create a fresh overlord
+ fresh_overlord = self._clone()
+ fresh_overlord.overlord_query.fact_query = self.overlord_query.fact_query.filter(*args,**kwargs)
+
+ #give back the fresh reference
+ return fresh_overlord
+
+ def filter_or(self,*args,**kwargs):
+ """
+ Filter The facts and doesnt call
+ the minion directly just gives back a
+ reference to the same object ORED
+ """
+ #create a fresh overlord
+ fresh_overlord = self._clone()
+ fresh_overlord.overlord_query.fact_query = self.overlord_query.fact_query.filter_or(*args,**kwargs)
+
+ #give back the fresh reference
+ return fresh_overlord
+
+ def and_and(self,*args,**kwargs):
+ """
+ Filter The facts and doesnt call
+ the minion directly just gives back a
+ reference to the same object ORED
+ """
+ #create a fresh overlord
+ fresh_overlord = self._clone()
+ fresh_overlord.overlord_query.fact_query = self.overlord_query.fact_query.and_and(*args,**kwargs)
+
+ #give back the fresh reference
+ return fresh_overlord
+
+
+
+ def and_or(self,*args,**kwargs):
+ """
+ Filter The facts and doesnt call
+ the minion directly just gives back a
+ reference to the same object ORED
+ """
+ #create a fresh overlord
+ fresh_overlord = self._clone()
+ fresh_overlord.overlord_query.fact_query = self.overlord_query.fact_query.and_or(*args,**kwargs)
+
+ #give back the fresh reference
+ return fresh_overlord
+
+ def or_or(self,*args,**kwargs):
+ """
+ Filter The facts and doesnt call
+ the minion directly just gives back a
+ reference to the same object ORED
+ """
+ #create a fresh overlord
+ fresh_overlord = self._clone()
+ fresh_overlord.overlord_query.fact_query = self.overlord_query.fact_query.or_or(*args,**kwargs)
+
+ #give back the fresh reference
+ return fresh_overlord
+
+
+ def or_and(self,*args,**kwargs):
+ """
+ Filter The facts and doesnt call
+ the minion directly just gives back a
+ reference to the same object ORED
+ """
+ #create a fresh overlord
+ fresh_overlord = self._clone()
+ fresh_overlord.overlord_query.fact_query = self.overlord_query.fact_query.or_and(*args,**kwargs)
+
+ #give back the fresh reference
+ return fresh_overlord
+
+ def set_complexq(self,q_object,connector=None):
+ #create a fresh overlord
+ fresh_overlord = self._clone()
+ fresh_overlord.overlord_query.fact_query = self.overlord_query.fact_query.set_compexq(q_object,connector)
+
+ #give back the fresh reference
+ return fresh_overlord
+
+ def _clone(self,klass=None):
+ """
+ That method is for situations where we use query stuff
+ when querying it is important to return a fresh object of
+ Overlord instead of working on the same oone,so we can
+ work on one instance which reproduces many temporary ones
+ """
+ from copy import copy
+ if klass is None:
+ klass = self.__class__
+
+ #create a fresh copy
+ c = klass(copy(self.server_spec),
+ port=copy(self.port),
+ verbose=copy(self.verbose),
+ interactive=copy(self.interactive),
+ noglobs = copy(self.noglobs),
+ nforks = copy(self.nforks),
+ async = copy(self.async),
+ delegate=copy(self.delegate),
+ mapfile = copy(self.mapfile),
+ config = copy(self.config)
+ )
+ c.timeout = copy(self.timeout)
+ c.cm_config = copy(self.cm_config)
+ return c
class Client(Overlord):
def __init__(self, *args, **kwargs):
Overlord.__init__(self, *args, **kwargs)
- # provided for backward compatibility only
+ # provided for backward compatibility only
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/cmd_modules/call.py
^
|
@@ -30,6 +30,130 @@
class CallConfig(BaseConfig):
force_async = ListOption('')
+#Facts parser and utility class
+
+class FactsCommand(object):
+ """
+ That class takes the params that are entered from commandline for
+ facts and parses and converts them to usable Overlord fact arguments
+ """
+
+ __valid_operators = {
+ "=":"",
+ "<":"lt",
+ ">":"gt",
+ }
+ __valid_c_operators={
+
+ "<=":"lte",
+ ">=":"gte"
+ }
+
+ __valid_keywords = {
+ "in":"contains",
+ "ini":"icontains",
+ }
+
+ def do(self,filter,filteror,*args,**kwargs ):
+ """
+ The action part
+ """
+ if not filter and not filteror:
+ return False
+ elif filter and filteror:
+ return False
+
+ tmp_arg = filter or filteror
+ parse_result = self.__parse_fact_args(tmp_arg)
+ if not parse_result:
+ return False
+ return parse_result
+
+
+ def __parse_fact_args(self,args):
+ """
+ Parses the format of the arguments which is like
+ keyword=value,keyword<value,value in keyword
+ """
+ comma_separated = args.split(",")
+ if not self.__is_coma_ok(comma_separated):
+ return False
+
+ final_dict = {}
+
+ for com_key in comma_separated:
+ res = self.__convert_keyword(com_key)
+ if not res:
+ #self.outputUsage()
+ return False
+
+ final_dict[res[0]]=res[1]
+
+ return final_dict
+
+
+ def __is_coma_ok(self,comma_list):
+ """
+ Chechs if the comma separated expression is ok
+ """
+ for c in comma_list:
+ if not c:
+ #self.outputUsage()
+ return False
+ return True
+
+ def __convert_keyword(self,keyword):
+ """
+ Convert keyword to a ready to use Overlord parameter
+ """
+ keyword = keyword.strip()
+
+ #check for space first
+ if keyword.find(" ")!=-1:
+ #do the keyword operations first
+ tmp_kw = keyword.split()
+ return self.__join_keyword(tmp_kw,self.__valid_keywords)
+
+ else:
+ for op in self.__valid_c_operators:
+ if keyword.find(op)!=-1:
+ tmp_kw = keyword.split(op)
+ return self.__join_keyword(tmp_kw,self.__valid_c_operators,op)
+ #do the operator things
+ for op in self.__valid_operators:
+ if keyword.find(op)!=-1:
+ tmp_kw = keyword.split(op)
+ return self.__join_keyword(tmp_kw,self.__valid_operators,op)
+ return False
+
+ def __join_keyword(self,tmp_kw,valid_set,operator=None):
+ """
+ A common util operation we do
+ """
+ if not operator:
+ if not len(tmp_kw) == 3:
+ return False
+ else:
+ value = tmp_kw[2]
+ operator = tmp_kw[1]
+
+ else:
+ if not len(tmp_kw) == 2:
+ return False
+ else:
+ value = tmp_kw[1]
+
+
+ #doing that trick to not to loose some of the oprators when showing
+ if not operator.strip() in valid_set.keys():
+ return False
+
+ if operator in self.__valid_keywords.keys():
+ return "".join([value.strip(),"__",valid_set[operator.strip()]]),tmp_kw[0].strip()
+ else:
+ return "".join([tmp_kw[0].strip(),"__",valid_set[operator.strip()]]),value.strip()
+
+
class Call(base_command.BaseCommand):
name = "call"
usage = "call module method name arg1 arg2..."
@@ -50,6 +174,9 @@
self.parser.add_option("-p", "--pickle", dest="pickle",
help="output return data in python pickle format",
action="store_true")
+ self.parser.add_option("-b", "--basic", dest="basic",
+ help="output return data with minimal, basic formating",
+ action="store_true")
self.parser.add_option("-f", "--forks", dest="forks",
help="how many parallel processes? (default 1)",
default=self.forks)
@@ -70,6 +197,22 @@
help="use delegation to make function call",
default=self.delegate,
action="store_true")
+ self.parser.add_option('-l', '--logpoll', dest="logpoll",
+ help="Polls for that call for minion side to get some useful output info.",
+ action="store_true")
+ self.parser.add_option('-o', '--logone', dest="logone",
+ help="Polls for that call for minion side to get some useful output info,for only one host,must suply job_id;host as parameter",
+ action="store")
+
+ self.parser.add_option('-r', '--progress', dest="progress",
+ help="Polls for that call for minion side to get the progress.",
+ action="store")
+ self.parser.add_option("", "--filter", dest="filter",
+ help="use filter to and minion facts",
+ action="store")
+ self.parser.add_option("", "--filteror", dest="filteror",
+ help="use filteror to or minion facts",
+ action="store")
def handleOptions(self, options):
self.options = options
@@ -82,13 +225,13 @@
self.argv = argv
return base_command.BaseCommand.parse(self, argv)
-
+
def format_return(self, data):
"""
The call module supports multiple output return types, the default is pprint.
"""
-
+
if self.options.xmlrpc:
return xmlrpclib.dumps((data,""))
@@ -107,6 +250,10 @@
import pickle
return pickle.dumps(data)
+ if self.options.basic:
+ output = str(data)
+ return output
+
return pprint.pformat(data)
def do(self, args):
@@ -120,7 +267,7 @@
if not args:
self.outputUsage()
return
-
+
self.module = args[0]
if len(args) > 1:
self.method = args[1]
@@ -145,27 +292,60 @@
self.async = self.options.async
self.forks = self.options.forks
self.delegate = self.options.delegate
-
+
self.server_spec = self.parentCommand.server_spec
+ #do we have exclude option activated ?
+ self.exclude_spec = self.parentCommand.exclude_spec
+
self.getOverlord()
+ #the facts part inserted here
+ if self.options.filter or self.options.filteror:
+ facts = FactsCommand()
+ result_fact = facts.do(self.options.filter,self.options.filteror)
+ if not result_fact:
+ self.outputUsage()
+ return
+
+ if self.options.filter:
+ #print "The result facts are : ",result_fact
+ self.overlord_obj=self.overlord_obj.filter(**result_fact)
+ elif self.options.filteror:
+ self.overlord_obj=self.overlord_obj.filter_or(**result_fact)
+
+
+ #end of the facts parsing
+
if not self.options.jobstatus:
- results = self.overlord_obj.run(self.module, self.method, self.method_args)
+ if self.options.filter or self.options.filteror:
+ results = self.overlord_obj.run(self.module, self.method,[{'__fact__':self.overlord_obj.overlord_query.serialize_query()}]+list(self.method_args))
+ else:
+ results = self.overlord_obj.run(self.module, self.method, self.method_args)
else:
(return_code, async_results) = self.overlord_obj.job_status(self.module)
res = self.format_return((return_code, async_results))
print res
return async_results
- if self.options.async:
+ #log for only one machine which is more reasonable instead
+ #of doing it for thousands ...
+ if self.options.logone:
+ self._poll_logs(self.module,self.options.logone)
+ return #terminate no need for more
+
+ if self.options.progress:
+ self._print_progress(self.module,self.options.progress)
+ return #terminate no need for more
+
+ if self.options.async and not self.options.delegate:
self.partial = {}
if self.options.nopoll:
print "JOB_ID:", pprint.pformat(results)
return results
else:
-
+ if self.options.logpoll:
+ self._poll_logs(results)
return self.overlord_obj.local.utils.async_poll(results, self.print_results)
-
# dump the return code stuff atm till we figure out the right place for it
foo = self.format_return(results)
print foo
@@ -176,3 +356,115 @@
def print_results(self, res):
for i in res.iteritems():
print self.format_return(i)
+
+ #do it only for some of the hosts if needed !
+ def _poll_logs(self,job_id,host=None):
+ """
+ Here the method polls for log and prints some
+ logs on the screen,which is kind of informative
+ action for other apps and users also !
+ """
+ import time
+ from func.minion.modules.jobs import NUM_OF_LINES
+ #a constant that will tell us from how many same
+ # logs we will accept that the rest of logs is the
+ #same we should stop somewhere !
+ print_result = {}
+ to_print = {}
+ poll_res = (None,False)#initial state
+ print_first_time = True
+ while not poll_res[1]:#while the job_id is not finished
+ if print_first_time and host:
+ poll_res = self.overlord_obj.tail_log(job_id,host,True)
+ else:
+ poll_res = self.overlord_obj.tail_log(job_id,host)
+
+ if not poll_res[0]:
+ print "Logging data is initializing ..."
+ time.sleep(0.5)
+ poll_res = self.overlord_obj.tail_log(job_id,host)
+ continue
+
+ #print the stuff you collected
+ for minion,log in poll_res[0].iteritems():
+ log = self._convert_log_to_list(log)
+
+ if not print_result.has_key(minion):
+ print_result[minion]=log
+ to_print[minion]=log
+
+ else:
+ #print "---------------------------------------------"
+ #print "PRINT_RESULT : ",print_result[minion]
+ #print "LOG IS ",log
+
+ to_print[minion]=list(set(log).difference(set(print_result[minion])))
+ print_result[minion]=list(set(print_result[minion]).union(set(to_print[minion])))
+ #should empty the buffer a little bit
+ #think if you have a file which is 1 GB :)
+ #print_result[minion] = print_result[minion][-NUM_OF_LINES:]
+ #print "PRINT_RESULT : ",print_result[minion]
+
+ #print "to_print ",to_print
+ #print "---------------------------------------------"
+ #raw_input()
+
+ self._print_dict_result(to_print,print_first_time)
+ if print_first_time and host:
+ print_first_time = False
+
+ time.sleep(0.5)
+
+ def _print_progress(self,job_id,host):
+ """
+ Gets the progress for job_id and host
+ """
+ import time
+ from func.utils import ProgressBar,TerminalController
+
+ poll_res = (None,False)#initial state
+ first_time = True
+ while not poll_res[1]:#while the job_id is not finished
+ poll_res = self.overlord_obj.check_progress(job_id,host)
+ #print poll_res
+ if not poll_res[0]:
+ time.sleep(0.5)
+ continue
+
+ if first_time:
+
+ term = TerminalController()
+ progress = ProgressBar(term, 'Progress Status',minValue=poll_res[0][host][0],maxValue=poll_res[0][host][1])
+ first_time = False
+
+ #update the progress bar
+ progress.update(poll_res[0][host][0])
+ #sleep a little bit
+ time.sleep(0.5)
+
+ if first_time:
+ print "Method has no progress ability or some remote error occured"
+ else:
+ #clear the progress bar and say it is done
+ progress.clear()
+ print "JOB FINISHED : ",job_id
+
+
+ def _print_dict_result(self,result,print_host=True):
+ """
+ An util method that just prints info
+ in a result dictionary ...
+ """
+ for minion,logs in result.iteritems():
+ if logs:
+ if print_host:
+ print "------HOST : %s -------"%minion
+ print "\n".join(logs)
+
+
+ def _convert_log_to_list(self,log):
+ res = []
+ for l in log:
+ if l:
+ res.extend(l.split("\n"))
+ return res
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/cmd_modules/check.py
^
|
@@ -20,12 +20,14 @@
from func.overlord import base_command
from certmaster import utils
+from func import utils as func_utils
from func.minion import sub_process
from certmaster.config import read_config
from certmaster.commonconfig import MinionConfig
from func.commonconfig import FuncdConfig
+
class CheckAction(base_command.BaseCommand):
name = "check"
usage = "check func for possible setup problems"
@@ -47,58 +49,58 @@
self.minion_config = read_config('/etc/certmaster/minion.conf', MinionConfig)
self.funcd_config = read_config('/etc/func/minion.conf', FuncdConfig)
-
+
if not self.check_certmaster and not self.check_minion:
- print "* specify --certmaster, --minion, or both"
- return
+ print "* specify --certmaster, --minion, or both"
+ return
else:
- print "SCAN RESULTS:"
+ print "SCAN RESULTS:"
- hostname = utils.get_hostname()
+ hostname = func_utils.get_hostname_by_route()
print "* FQDN is detected as %s, verify that is correct" % hostname
self.check_iptables()
if not os.getuid() == 0:
- print "* root is required to run these setup tests"
- return
+ print "* root is required to run these setup tests"
+ return
if self.check_minion:
- # check that funcd is running
- self.check_service("funcd")
+ # check that funcd is running
+ self.check_service("funcd")
+
+ # check that the configured certmaster is reachable
+ self.check_talk_to_certmaster()
- # check that the configured certmaster is reachable
- self.check_talk_to_certmaster()
-
if self.check_certmaster:
- # check that certmasterd is running
- self.check_service("certmasterd")
+ # check that certmasterd is running
+ self.check_service("certmasterd")
- # see if we have any waiting CSRs
- # FIXME: TODO
+ # see if we have any waiting CSRs
+ # FIXME: TODO
- # see if we have signed any certs
- # FIXME: TODO
+ # see if we have signed any certs
+ # FIXME: TODO
- self.server_spec = self.parentCommand.server_spec
- self.getOverlord()
-
- results = self.overlord_obj.test.add(1,2)
- hosts = results.keys()
- if len(hosts) == 0:
- print "* no systems have signed certs"
- else:
- failed = 0
- for x in hosts:
- if results[x] != 3:
- failed = failed+1
- if failed != 0:
- print "* unable to connect to %s registered minions from overlord" % failed
- print "* run func '*' ping to check status"
+ self.server_spec = self.parentCommand.server_spec
+ self.getOverlord()
+
+ results = self.overlord_obj.test.add(1,2)
+ hosts = results.keys()
+ if len(hosts) == 0:
+ print "* no systems have signed certs"
+ else:
+ failed = 0
+ for x in hosts:
+ if results[x] != 3:
+ failed = failed+1
+ if failed != 0:
+ print "* unable to connect to %s registered minions from overlord" % failed
+ print "* run func '*' ping to check status"
- # see if any of our certs have expired
+ # see if any of our certs have expired
# warn about iptables if running
print "End of Report."
@@ -112,18 +114,18 @@
def check_iptables(self):
if os.path.exists("/etc/rc.d/init.d/iptables"):
rc = sub_process.call("/sbin/service iptables status >/dev/null 2>/dev/null", shell=True)
-
+
if rc == 0:
- # FIXME: don't hardcode port
- print "* iptables may be running"
- print "Insure that port %s is open for minions to connect to certmaster" % self.minion_config.certmaster_port
- print "Insure that port %s is open for overlord to connect to minions" % self.funcd_config.listen_port
+ # FIXME: don't hardcode port
+ print "* iptables may be running"
+ print "Insure that port %s is open for minions to connect to certmaster" % self.minion_config.certmaster_port
+ print "Insure that port %s is open for overlord to connect to minions" % self.funcd_config.listen_port
def check_talk_to_certmaster(self):
# FIXME: don't hardcode port
master_uri = "http://%s:%s/" % (self.minion_config.certmaster, self.minion_config.certmaster_port)
print "* this minion is configured in /etc/certmaster/minion.conf"
- print " to talk to host '%s' on port %s for certs, verify that is correct" % (self.minion_config.certmaster,
+ print " to talk to host '%s' on port %s for certs, verify that is correct" % (self.minion_config.certmaster,
self.minion_config.certmaster_port)
# this will be a 501, unsupported GET, but we should be
# able to tell if we can make contact
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/cmd_modules/copyfile.py
^
|
@@ -46,7 +46,7 @@
if not self.options.filename or not self.options.remotepath:
self.outputUsage()
return
-
+
self.server_spec = self.parentCommand.server_spec
self.getOverlord()
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/overlord/cmd_modules/dumpconfig.py
^
|
@@ -0,0 +1,46 @@
+"""
+Dump func-client/overlord config information
+
+Copyright 2011, Red Hat, Inc
+see AUTHORS
+
+This software may be freely redistributed under the terms of the GNU
+general public license.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+"""
+
+
+import optparse
+import os
+
+from func.overlord import base_command
+from certmaster import certs
+
+class DumpConfig(base_command.BaseCommand):
+ name = "dump_config"
+ usage = "dump func-client/overlord config"
+ summary = usage
+
+ def do(self, args):
+ self.server_spec = self.parentCommand.server_spec
+ self.getOverlord()
+ print 'config:'
+ for l in str(self.overlord_obj.config).split('\n'):
+ print '\t' + l
+ print ''
+ print 'key file: %s' % self.overlord_obj.key
+ cert = certs.retrieve_cert_from_file(self.overlord_obj.cert)
+ print 'cert file: %s' % self.overlord_obj.cert
+ print 'ca file: %s' % self.overlord_obj.ca
+ print 'cert dn: %s' % cert.get_subject().CN
+ print 'certificate hash: %s' % cert.subject_name_hash()
+ print 'timeout: %s' % self.overlord_obj.timeout
+ print 'forks: %s' % self.overlord_obj.nforks
+ print 'cmd modules loaded:'
+ for mn in sorted(self.overlord_obj.methods.keys()):
+ print '\t' + mn
+ print 'minion map:'
+ print self.overlord_obj.minionmap
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/overlord/cmd_modules/grep.py
^
|
@@ -0,0 +1,152 @@
+"""
+grep utility
+
+Copyright 2007, Red Hat, Inc
+see AUTHORS
+
+This software may be freely redistributed under the terms of the GNU
+general public license.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+"""
+
+
+import pprint
+import sys
+import types
+
+from func.overlord import client
+from func.overlord import base_command
+
+class Grep(base_command.BaseCommand):
+ name = "grep"
+ usage = "grep [--modules = 'module1,module2'] search_term"
+ summary = "Greps for some keyword in modules useful for troubleshooting"
+
+ def addOptions(self):
+ """
+ Add options for grep utility ...
+ """
+ self.parser.add_option("-v", "--verbose", dest="verbose",
+ default=self.verbose,
+ action="store_true")
+ self.parser.add_option("-a", "--async", dest="async",
+ help="Use async calls? (default 0)",
+ default=self.async,
+ action="store_true")
+ self.parser.add_option('-d', '--delegate', dest="delegate",
+ help="use delegation to make function call",
+ default=self.delegate,
+ action="store_true")
+ self.parser.add_option('-m', '--modules', dest="modules",
+ help="modules to be searched",
+ default=[],
+ action="append")
+
+
+ def handleOptions(self, options):
+ self.options = options
+ self.verbose = options.verbose
+
+ # I'm not really a fan of the "module methodname" approach
+ # but we'll keep it for now -akl
+
+ def parse(self, argv):
+ self.argv = argv
+ return base_command.BaseCommand.parse(self, argv)
+
+
+
+ def do(self, args):
+ """
+ Grepping the stuf real part
+ """
+
+ if not args:
+ self.outputUsage()
+ return
+
+ #the search keyword
+ self.word = args[0]
+
+ self.async = self.options.async
+ self.delegate = self.options.delegate
+
+ self.server_spec = self.parentCommand.server_spec
+ self.getOverlord()
+
+ host_modules = self._get_host_grep_modules(self.server_spec)
+ results = {}
+
+ # We could do this across hosts or across modules. Not sure
+ # which is better, this is across hosts (aka, walk across the
+ # hosts, then ask all the module.grep methods to it, then on to
+ # next host
+
+ existent_minions_class = self.overlord_obj.minions_class # keep a copy
+
+ for host in host_modules.keys():
+ host_only_mc = self.overlord_obj._mc(host, noglobs=True)
+ host_only_mc.get_all_hosts()
+ self.overlord_obj.minions_class = host_only_mc
+ for module in host_modules[host]:
+ if self.options.modules and module in self.options.modules:
+ if self.options.verbose:
+ print "Scanning module: %s on host: %s" %(module, host)
+
+ tmp_res = self.overlord_obj.run(module,"grep",[self.word])
+
+ if self.options.async:
+ tmp_res = self.overlord_obj.local.utils.async_poll(tmp_res,None)
+ #FIXME: I'm not sure what the best format for this is...
+ if tmp_res[host]:
+ print "%s: %s" % (host, pprint.pformat(tmp_res[host]))
+
+ self.overlord_obj.minions_class = existent_minions_class # put it back
+
+ def _get_host_grep_modules(self, server_spec):
+ """
+ In cases when user doesnt supply the module list
+ we have to consider that all of the modules are
+ chosen so that method will return a list of them
+ """
+
+ #insetad of getting all of the modules we consider
+ #that all of machines has the same modules ...
+
+
+ host_modules = {}
+ #FIXME: we need to change this to create a dict of hostname->modules
+ # so we only call module.grep on systems that report it. things like
+ # virt/hardware aren't available on all guests
+
+ if not hasattr(self, 'overlord_obj'):
+ self.getOverlord()
+
+ hosts = self.overlord_obj.minions_class.get_all_hosts()
+ existent_minions_class = self.overlord_obj.minions_class # keep a copy
+ if not hosts:
+ raise Exception("No minions on system!")
+
+
+ for host in hosts:
+ host_only_mc = self.overlord_obj._mc(host, noglobs=True)
+ self.overlord_obj.minions_class = host_only_mc
+ module_methods = self.overlord_obj.system.inventory()
+
+ for hn in module_methods:
+ if type(module_methods[hn]) != types.DictType:
+ sys.stderr.write("Error on host %s: %s" % (hn, ' '.join(module_methods[hn])))
+ continue
+
+ for module in module_methods[hn]:
+ # searching for "grep"? meta
+ if "grep" in module_methods[hn][module]:
+ if not host_modules.has_key(host):
+ host_modules[host] = []
+ host_modules[host].append(module)
+
+ self.overlord_obj.minions_class = existent_minions_class # put it back
+ return host_modules
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/overlord/cmd_modules/group.py
^
|
@@ -0,0 +1,227 @@
+"""
+group func method invoker
+
+Copyright 2007, Red Hat, Inc
+see AUTHORS
+
+This software may be freely redistributed under the terms of the GNU
+general public license.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+"""
+
+from func.overlord.groups import Groups
+from func.overlord import base_command
+from certmaster.config import read_config, BaseConfig, ListOption
+
+
+class Group(base_command.BaseCommand):
+ name = "group"
+ usage = "group [--add_group] [--remove_group] [--list_group] [--list_all] [--add_host] [--remove_host] [--list_hosts] {--exclude}"
+ summary = "allows a specific module and method to be called"
+ def addOptions(self):
+
+ self.parser.add_option("-v", "--verbose", dest="verbose",
+ default=self.verbose,
+ action="store_true")
+
+ self.parser.add_option("--ag", "--add_group",
+ dest="add_group",
+ action="store_true")
+
+ self.parser.add_option("--lg", "--list_group",
+ dest="list_group",
+ action="store_true")
+
+
+ self.parser.add_option("--rg", "--remove_group",
+ dest="remove_group",
+ action="store_true")
+
+ self.parser.add_option("--la", "--list_all",
+ dest="list_all",
+ action="store_true")
+
+ self.parser.add_option("--ah", "--add_host",
+ dest="add_host",
+ action="store_true")
+
+ self.parser.add_option("--rh", "--remove_host",
+ dest="remove_host",
+ action="store_true")
+
+ self.parser.add_option("--lh", "--list_hosts",
+ dest="list_hosts",
+ action="store_true")
+
+ self.parser.add_option("--e", "--exclude",
+ dest="exclude",
+ action="store",
+ type="string")
+
+
+ def handleOptions(self, options):
+ self.options = options
+ self.verbose = options.verbose
+
+ # I'm not really a fan of the "module methodname" approach
+ # but we'll keep it for now -akl
+
+ def parse(self, argv):
+ self.argv = argv
+
+ return base_command.BaseCommand.parse(self, argv)
+
+
+ def do(self, args):
+
+ #create a group object
+ #it will get the internals from other places :)
+ self.group = Groups()
+
+ #for that one we dont need any args
+ if self.options.list_all:
+ self.group.show()
+
+ else:
+ #here we need to have args[0]
+ if not args:
+ self.outputUsage()
+ return
+
+ if self.options.add_group:
+ self._add_gr(args[0])
+
+ elif self.options.remove_group:
+ self._rm_gr(args[0])
+
+ elif self.options.list_group:
+ self._ls_gr(args[0])
+
+ elif self.options.add_host:
+ self._add_host(args[0])
+
+ elif self.options.remove_host:
+ self._rm_host(args[0])
+
+ elif self.options.list_hosts:
+ self._ls_host(args[0])
+
+ else:
+ #no valid usage
+ self.outputUsage()
+
+ def _add_gr(self,args):
+ """
+ Add args a list of gorups
+ """
+ args = self._parse_args_list(args)
+ for arg in args:
+ res = self.group.add_group(arg,save=True)
+ if not res[0]:
+ print res[1]
+
+
+ def _rm_gr(self,args):
+ args = self._parse_args_list(args)
+ for arg in args:
+ self.group.remove_group_glob(arg)
+
+ def _ls_gr(self,args):
+ args = self._parse_args_list(args)
+ print "GROUPS : "
+ for arg in args:
+ res=self.group.get_groups_glob(arg)
+ if res :
+ print "\t ",res
+
+
+ def _add_host(self,args):
+ args = self._parse_args_list(args)
+ args = self._match_group_host(args)
+ if self.options.exclude:
+ exclude = self._parse_args_list(self.options.exclude)
+ exclude = self._match_group_host(exclude)
+
+ for g_i,g_e in zip(args.iteritems(),exclude.iteritems()):
+ for host_include,host_exclude in zip(g_i[1],g_e[1]):
+ self.group.add_hosts_to_group_glob(g_i[0][1:],host_include,exclude_string=host_exclude)
+ #user didnt enter the exclude option so go on normally
+ else:
+ for group,hosts in args.iteritems():
+ for h in hosts:
+ #adding here
+ #sometimes we may have formats like that :
+ #@group1:one,two,three
+ if h.find(",") != -1:
+ for sub_host in h.split(","):
+ self.group.add_hosts_to_group_glob(group[1:],sub_host)
+ else:
+ self.group.add_hosts_to_group_glob(group[1:],h)
+
+
+
+ def _rm_host(self,args):
+ args = self._parse_args_list(args)
+ args = self._match_group_host(args)
+ if self.options.exclude:
+ exclude = self._parse_args_list(self.options.exclude)
+ exclude = self._match_group_host(exclude)
+ for g_i,g_e in zip(args.iteritems(),exclude.iteritems()):
+ for host_include,host_exclude in zip(g_i[1],g_e[1]):
+ self.group.remove_host_glob(g_i[0][1:],host_include,exclude_string=host_exclude)
+ #user didnt enter the exclude option so go on normally
+ else:
+ for group,hosts in args.iteritems():
+ for h in hosts:
+ #adding here
+ if h.find(",") != -1:
+ for sub_host in h.split(","):
+ self.group.remove_host_glob(group[1:],sub_host)
+ else:
+ self.group.remove_host_glob(group[1:],h)
+
+
+ def _ls_host(self,args):
+ if self.options.exclude:
+ print self.group.get_hosts_glob(args,exclude_string=self.options.exclude)
+ else:
+ print self.group.get_hosts_glob(args)
+
+ def _parse_args_list(self,args):
+ """
+ Parsing the args sometimes we separate em
+ via ; or , so need to convert to a list
+ """
+ if args.find(";")!=-1:
+ return args.split(";")
+ elif args.find(",")!=-1:
+ return args.split(";")
+ return [args]
+
+ def _match_group_host(self,args):
+ """
+ Returns a dictionary for
+ {group:hosts}
+ """
+ groups = {}
+ for arg in args :
+ if arg.find(":")!=-1:
+ group,host = arg.split(":")
+ if groups.has_key(group):
+ if not host in groups[group]:
+ groups[group].append(host)
+ else:
+ groups[group]=[]
+ groups[group].append(host)
+ else:
+ group = arg
+ if groups.has_key(group):
+ if not "*" in groups[group]:
+ groups[group].append("*")
+ else:
+ groups[group]=[]
+ groups[group].append("*")
+ return groups
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/cmd_modules/listminions.py
^
|
@@ -33,14 +33,13 @@
if options.verbose:
self.verbose = self.options.verbose
-
+
def do(self, args):
self.server_spec = self.parentCommand.server_spec
-
- minion_set = client.Minions(self.server_spec, port=self.port)
+ self.getOverlord()
+ minion_set = self.overlord_obj.minions_class
servers = minion_set.get_all_hosts()
servers.sort()
for server in servers:
print server
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/cmd_modules/ping.py
^
|
@@ -50,14 +50,17 @@
# to make things look more speedy.
minion_set = client.Minions(self.server_spec, port=self.port)
+ if minion_set.overlord_config.puppet_minions:
+ minion_set = client.PuppetMinions(self.server_spec, port=self.port)
servers = minion_set.get_all_hosts()
for server in servers:
+ if server in minion_set.downed_hosts:
+ continue
- overlord_obj = client.Overlord(server,port=self.port,
+ overlord_obj = client.Overlord(server,
interactive=False,
verbose=self.verbose,
- config=self.config,
noglobs=True)
results = overlord_obj.run("test", "ping", [])
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/cmd_modules/show.py
^
|
@@ -28,21 +28,41 @@
summary = usage
subCommandClasses = [show_hardware.ShowHardware]
+ socket_timeout = None
+ exclude_spec = None
+ conffile = None
+
def addOptions(self):
self.parser.add_option("-v", "--verbose", dest="verbose",
action="store_true")
+ self.parser.add_option('-t', '--timeout', dest="timeout", type="float",
+ help="Set default socket timeout in seconds")
+ self.parser.add_option('-e', '--exclude', dest="exclude",
+ help="exclude some of minions",
+ action="store",
+ type="string")
+ self.parser.add_option('-c', '--conf', dest="conffile",
+ help="specify an overlord.conf file for func to use")
def handleOptions(self, options):
self.options = options
self.verbose = options.verbose
+ if options.timeout:
+ self.socket_timeout = options.timeout
+
+ if options.exclude:
+ self.exclude_spec = options.exclude
+
+ if options.conffile:
+ self.conffile = options.conffile
def parse(self, argv):
self.argv = argv
return base_command.BaseCommand.parse(self, argv)
-
+
def do(self, args):
pass
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/cmd_modules/show_hardware.py
^
|
@@ -30,11 +30,11 @@
def addOptions(self):
self.parser.add_option("-v", "--verbose", dest="verbose",
action="store_true")
-
+
def handleOptions(self, options):
pass
-
+
def parse(self, argv):
self.argv = argv
return base_command.BaseCommand.parse(self,argv)
@@ -43,12 +43,12 @@
self.server_spec = self.parentCommand.parentCommand.server_spec
self.getOverlord()
-
+
results = self.overlord_obj.run("hardware", "info", [])
- # if the user
+ # if the user
top_options = ["port","verbose"]
-
+
for minion in results:
print "%s:" % minion
minion_data = results[minion]
@@ -56,9 +56,7 @@
if not args:
pprint.pprint(minion_data)
continue
-
+
for arg in args:
if arg in minion_data:
print minion_data[arg]
-
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/command.py
^
|
@@ -14,9 +14,6 @@
import optparse
import sys
-from certmaster.config import read_config, CONFIG_FILE
-
-from certmaster.commonconfig import CMConfig
class CommandHelpFormatter(optparse.IndentedHelpFormatter):
"""
@@ -56,7 +53,11 @@
ret += "\n" + "\n".join(commandDesc) + "\n"
return ret
-class CommandOptionParser(optparse.OptionParser):
+class FuncOptionParser(optparse.OptionParser):
+ def get_version(self):
+ return file("/etc/func/version").read().strip()
+
+class CommandOptionParser(FuncOptionParser):
"""
I parse options as usual, but I explicitly allow setting stdout
so that our print_help() method (invoked by default with -h/--help)
@@ -70,6 +71,7 @@
# we're overriding the built-in file, but we need to since this is
# the signature from the base class
__pychecker__ = 'no-shadowbuiltin'
+
def print_help(self, file=None):
# we are overriding a parent method so we can't do anything about file
__pychecker__ = 'no-shadowbuiltin'
@@ -77,6 +79,10 @@
file = self._stdout
file.write(self.format_help())
+ def get_version(self):
+ return file("/etc/func/version").read().strip()
+
+
class Command:
"""
I am a class that handles a command for a program.
@@ -116,7 +122,6 @@
self.stderr = stderr
self.parentCommand = parentCommand
- self.config = read_config(CONFIG_FILE, CMConfig)
# create subcommands if we have them
self.subCommands = {}
@@ -162,6 +167,7 @@
description = self.description or self.summary
self.parser = CommandOptionParser(
usage=usage, description=description,
+ version=True,
formatter=formatter)
self.parser.set_stdout(self.stdout)
self.parser.disable_interspersed_args()
@@ -218,7 +224,6 @@
# command
args = [args[1], args[0]]
-
# if we have args that we need to deal with, do it now
# before we start looking for subcommands
self.handleArguments(args)
@@ -268,6 +273,7 @@
"""
self.parser.print_usage(file=self.stderr)
+
def handleOptions(self, options):
"""
Handle the parsed options.
@@ -288,4 +294,3 @@
while c.parentCommand:
c = c.parentCommand
return c
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/delegation_tools.py
^
|
@@ -15,14 +15,15 @@
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
+from func import utils as func_utils
import fnmatch
class groupby(object):
"""
- Borrowing the groupby iterator class directly
+ Borrowing the groupby iterator class directly
from the Python API as it does not exist in Pythons < 2.4
"""
-
+
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
@@ -31,7 +32,7 @@
self.tgtkey = self.currkey = self.currvalue = xrange(0)
def __iter__(self):
return self
- def next(self):
+ def next(self):
while self.currkey == self.tgtkey:
self.currvalue = self.it.next() # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
@@ -45,32 +46,34 @@
def group_paths(ungrouped_list):
"""
- Given a list of multi-element path lists,
+ Given a list of multi-element path lists,
groups them together into a list of single-element paths (which
exist directly under the current overlord) and a dictionary of paths
to send to next hops in the delegation chain, containing a list of lists
keyed by their common next hop.
"""
-
+
single_paths = [path[0] for path in ungrouped_list if len(path) == 1]
non_single_paths = [path for path in ungrouped_list if len(path) > 1]
- path_group = dict([(key,[path[1:len(path)] for path in list(gen)])
+ path_group = dict([(key,[path[1:len(path)] for path in list(gen)])
for key, gen in groupby(non_single_paths,
key=lambda x:x[0])])
-
+
return (single_paths,path_group)
-
-def get_paths_for_glob(glob, minionmap):
+
+def get_paths_for_glob(glob_list, minionmap):
"""
Given a glob, returns shortest path to all minions
matching it in the delegation dictionary tree
"""
-
+
pathlist = []
- for elem in match_glob_in_tree(glob,minionmap):
- result = get_shortest_path(elem,minionmap)
- if result not in pathlist: #prevents duplicates
- pathlist.append(result)
+ for glob in glob_list.split(";"):
+ for g in func_utils.get_all_host_aliases(glob):
+ for elem in match_glob_in_tree(g,minionmap):
+ result = get_shortest_path(elem,minionmap)
+ if result not in pathlist: #prevents duplicates
+ pathlist.append(result)
return pathlist
def list_all_minions(minionmap):
@@ -92,7 +95,7 @@
Flattens gnarly nested lists into much
nicer, flat lists
"""
-
+
flat_list = []
for item in bumpy_list:
if isinstance(item, list):
@@ -108,20 +111,20 @@
for all keys (minion FQDNs) matching the given
glob, returns matches
"""
-
+
matched = []
for k,v in minionmap.iteritems():
if fnmatch.fnmatch(k,pattern):
matched.append(k)
return matched
-
+
def match_glob_in_tree(pattern, minionmap):
"""
Searches through given tree dictionary for all
keys (minion FQDNs) matching the given glob,
returns matches
"""
-
+
matched = []
for k,v in minionmap.iteritems():
for result in match_glob_in_tree(pattern, v):
@@ -137,7 +140,7 @@
result denoting minion existence under your current
node
"""
-
+
return len(match_glob_on_toplevel(minion,minionmap)) > 0
def get_shortest_path(minion, minionmap):
@@ -146,12 +149,12 @@
this method returns all paths from the top
node to the minion in the form of a flat list
"""
-
+
def lensort(a,b):
if len(a) > len(b):
return 1
return -1
-
+
results = get_all_paths(minion,minionmap)
results.sort(lensort)
return results[0]
@@ -162,54 +165,54 @@
this method returns all paths that exist from the top
node to the minion in the delegation dictionary tree
"""
-
+
#This is an ugly kludge of franken-code. If someone with
#more knowledge of graph theory than myself can improve this
#module, please, please do so. - ssalevan 7/2/08
seq_list = []
-
+
if minion_exists_under_node(minion, minionmap):
return [[minion]] #minion found, terminate branch
-
+
if minionmap == {}:
return [[]] #no minion found, terminate branch
-
+
for k,v in minionmap.iteritems():
branch_list = []
branch_list.append(k)
-
+
for branchlet in get_all_paths(minion, v):
branch_list.append(branchlet)
-
+
single_branch = flatten_list(branch_list)
if minion in single_branch:
seq_list.append(single_branch)
-
+
return seq_list
-if __name__ == "__main__":
+if __name__ == "__main__":
mymap = {'anthony':{'longpath1':{'longpath2':{'longpath3':{}}}},
'phil':{'steve':{'longpath3':{}}},
'tony':{'mike':{'anthony':{}}},
'just_a_minion':{}
}
-
+
print "- Testing an element that exists in multiple lists of varying length:"
- for elem in match_glob_in_tree('*path3',mymap):
+ for elem in match_glob_in_tree('*path3',mymap):
print "Element: %s, all paths: %s" % (elem, get_all_paths(elem,mymap))
print "best path: %s" % get_shortest_path(elem, mymap)
-
+
print "- Testing an element that is simply a minion and has no sub-nodes:"
for elem in match_glob_in_tree('*minion',mymap):
print "Element: %s, best path: %s" % (elem, get_shortest_path(elem,mymap))
-
+
print "- OK, now the whole thing:"
for elem in match_glob_in_tree('*',mymap):
print "Element: %s, best path: %s" % (elem, get_shortest_path(elem,mymap))
-
+
print "- And finally, with all duplicates removed:"
for elem in get_paths_for_glob('*',mymap):
print "Valid Path: %s" % elem
-
+
print "- And grouped together:"
print group_paths(get_paths_for_glob('*',mymap))
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/func_command.py
^
|
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-
## func command line interface & client lib
##
## Copyright 2007,2008 Red Hat, Inc
@@ -25,20 +23,29 @@
name = "func"
usage = "func [--options] \"hostname glob\" module method [arg1] [arg2] ... "
+ socket_timeout = None
subCommandClasses = []
+ exclude_spec = None
+ conffile = None
def __init__(self):
modules = module_loader.load_modules('func/overlord/cmd_modules/', base_command.BaseCommand)
for x in modules.keys():
- self.subCommandClasses.append(modules[x].__class__)
- command.Command.__init__(self)
+ self.subCommandClasses.append(modules[x].__class__)
+ command.Command.__init__(self, parentCommand=FuncCommandLine)
def do(self, args):
pass
def addOptions(self):
- self.parser.add_option('', '--version', action="store_true",
- help="show version information")
+ self.parser.add_option('-t', '--timeout', dest="timeout", type="float",
+ help="Set default socket timeout in seconds")
+ self.parser.add_option('-e', '--exclude', dest="exclude",
+ help="exclude some of minions",
+ action="store",
+ type="string")
+ self.parser.add_option('-c', '--conf', dest="conffile",
+ help="specify an overlord.conf file for func to use")
# just some ugly goo to try to guess if arg[1] is hostnamegoo or
# a command name
@@ -46,7 +53,7 @@
if str.find("*") or str.find("?") or str.find("[") or str.find("]"):
return True
return False
-
+
def handleArguments(self, args):
if len(args) < 2:
sys.stderr.write("see the func manpage for usage\n")
@@ -62,6 +69,11 @@
# maybe a class variable self.data on Command?
def handleOptions(self, options):
- if options.version:
- #FIXME
- sys.stderr.write("version is NOT IMPLEMENTED YET\n")
+ if options.timeout:
+ self.socket_timeout = options.timeout
+
+ if options.exclude:
+ self.exclude_spec = options.exclude
+
+ if options.conffile:
+ self.conffile = options.conffile
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/overlord/group
^
|
+(directory)
|
|
Changed |
func-0.28.tar.bz2/func/overlord/group/__init__.py
^
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/overlord/group/base.py
^
|
@@ -0,0 +1,72 @@
+class BaseBackend(object):
+ """
+ A base class for others that will
+ implement a backend for groups api
+ """
+
+ def add_host_to_group(self,group,host,save=True):
+ """
+ Adds a host to a group
+ """
+ raise NotImplementedError
+
+
+ def add_group(self,group,save=True):
+ """
+ Adds a group
+ """
+ raise NotImplementedError
+
+ def remove_group(self,group,save=True):
+ """
+ Removes a group
+ """
+ raise NotImplementedError
+
+ def remove_host(self,group,host,save=True):
+ """
+ Remove a host from groups
+ """
+ raise NotImplementedError
+
+
+ def save_changes(self):
+ """
+ Push the stuff that is in memory
+ """
+ raise NotImplementedError
+
+ def get_groups(self,pattern=None,exact=True,exclude=None):
+ """
+ Get a set of groups
+ """
+ raise NotImplementedError
+
+ def get_hosts(self,pattern=None,group=None,exact=True,exclude=NotImplementedError):
+
+ """
+ Get a set of groups
+ """
+ raise NotImplementedError
+
+from func.commonconfig import OVERLORD_CONFIG_FILE,OverlordConfig
+from certmaster.config import read_config
+CONF_FILE = OVERLORD_CONFIG_FILE
+
+def choose_backend(backend=None,conf_file=None,db_file=None):
+ """
+ Chooses a backend accoding to params or what is
+ supplied ...
+ """
+
+ config = read_config(CONF_FILE,OverlordConfig)
+ backend = backend or config.backend or "conf"
+
+ if backend == "sqlite":
+ from func.overlord.group.sqlite_backend import SqliteBackend
+ return SqliteBackend(db_file=db_file)
+ elif backend == "conf":
+ from func.overlord.group.conf_backend import ConfBackend
+ return ConfBackend(conf_file=conf_file)
+ else:
+ raise Exception("No valid backend options supplied")
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/overlord/group/conf_backend.py
^
|
@@ -0,0 +1,264 @@
+##
+## Copyright 2007,2008 Red Hat, Inc
+## Adrian Likins <alikins@redhat.com>
+## +AUTHORS
+##
+## This software may be freely redistributed under the terms of the GNU
+## general public license.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+##
+
+
+# this module lets you define groups of systems to work with from the
+# commandline. It uses an "ini" style config parser like:
+
+#[groupname]
+#host = foobar, baz, blip
+#subgroup = blippy; flozzy
+
+# Subgrouping is supported only one level down, but group can have both
+# hosts and many subgroups with other hosts
+
+import ConfigParser
+import sys
+GROUP_FILE = "/etc/func/groups"
+
+
+from func.overlord.group.base import BaseBackend
+
+class ConfBackend(BaseBackend):
+ """
+ That backend uses a configuraton file for
+ keeping the stuff
+ """
+ def __init__(self,conf_file = None,*args,**kwargs):
+ """
+ Initializing the database if it doesnt exists it is created and
+ connection opened for serving nothing special
+
+ @param conf_file : Configuration file
+ """
+ self.config = conf_file or GROUP_FILE
+ self.__groups = {}
+ self.__parse()
+
+
+
+ def __parse(self):
+
+ self.cp = ConfigParser.SafeConfigParser()
+ self.cp.read(self.config)
+
+ #loop through the group_names
+ for section in self.cp.sections():
+ options = self.cp.options(section)
+ for option in options:
+ if option == "host":
+ hosts = self.cp.get(section,option)
+ hosts = hosts.replace(';',',')
+ hosts = hosts.split(",")
+ for h in hosts:
+ h = h.strip()
+ self.add_host_to_group(section,h,save=False)
+
+ def add_host_to_group(self,group,host,save=True):
+ """
+ Adds a host to a group
+ """
+ host = host.lower()
+ group = group.lower()
+
+ if not self.__groups.has_key(group):
+ self.__groups[group] = []
+
+ #dont want duplicates
+ if not host in self.__groups[group]:
+ if host:
+ self.__groups[group].append(host)
+ else:
+ return (False,"Host is already in database : %s"%host)
+
+ if save:
+ self.save_changes()
+ return (True,"")
+
+ def add_group(self,group,save=True):
+ """
+ Adds a group
+ """
+ group = group.lower()
+ if self.__groups.has_key(group):
+ return (False,"Group name : %s already exists"%group)
+ #create with an empty list
+ self.__groups[group] = []
+ if save:
+ self.save_changes()
+
+ return (True,'') #success
+
+ def remove_group(self,group,save=True):
+ """
+ Removes a group
+ """
+ if not self.__groups.has_key(group):
+ return (False,"Group name : %s doesnt exist"%group)
+ #delete that entry
+ if group in self.cp.sections():
+ #if we have it also here should remove it
+ if self.cp.has_section(group):
+ self.cp.remove_section(group)
+ #delete the entry
+ del self.__groups[group]
+
+ #Do you want to store it ?
+ if save:
+ self.save_changes()
+ return (True,'')
+
+ def remove_host(self,group,host,save=True):
+ """
+ Remove a host from groups
+ """
+ host = host.lower()
+ group = group.lower()
+
+ if not self.__groups.has_key(group) or not host in self.__groups[group]:
+ return (False,"Non existing group or name")
+
+ #remove the machine from there
+ self.__groups[group].remove(host)
+ #save to config file
+ if save:
+ self.save_changes()
+
+ return (True,'')
+
+
+ def save_changes(self):
+ """
+ Write changes to disk
+ """
+ for group_name,group_hosts in self.__groups.iteritems():
+ #if we have added a new group add it to config object
+ if not group_name in self.cp.sections():
+ self.cp.add_section(group_name)
+ self.cp.set(group_name,"host",",".join(group_hosts))
+
+ #store tha changes
+ conf_file = open(self.config, "w")
+ self.cp.write(conf_file)
+
+
+ def get_groups(self,pattern=None,exact=True,exclude=None):
+ """
+ Get a list of groups
+
+ @param pattern : You may request to get an exact host or
+ a one in proper pattern .
+ @param exact : Related to pattern if you should do exact
+ matching or related one.
+ @param exclude : A list to be excluded from final set
+
+ """
+
+ if not pattern:
+ #return all of them
+ if not exclude:
+ return self.__groups.keys()
+ else:
+ #get the difference of 2 sets
+ return list(set(self.__groups.keys()).difference(set(exclude)))
+ else:
+ #it seems there is a pattern
+ if exact:
+ #there is no mean to check here for
+ #exclude list ...
+ for g in self.__groups.keys():
+ if g == pattern.lower():
+ return [g]
+ return []
+
+ else:#not exact match
+ if not exclude:#there is no list to exclude
+ tmp_l = set()
+ for g in self.__groups.keys():
+ if pattern.lower() in g.lower():
+ tmp_l.add(g)
+ return list(tmp_l)
+ else:
+ tmp_l = set()
+ for g in self.__groups.keys():
+ if pattern.lower() in g:
+ tmp_l.add(g)
+ #get the difference of 2 sets
+ return list(tmp_l.difference(set(exclude)))
+
+ #shouldnt come here actually
+ return []
+
+
+ def get_hosts(self,pattern=None,group=None,exact=True,exclude=None):
+
+ """
+ Get a set of hosts
+
+ @param pattern : You may request to get an exact host or
+ a one in proper pattern .
+ @param exact : Related to pattern if you should do exact
+ matching or related one.
+ @param exclude : A list to be excluded from final set
+ """
+ #print "Caling %s:%s"%(pattern,group)
+ group = self.get_groups(pattern=group,exact=True)
+ #print "The group we got is : ",group
+ if not group or len(group)>1:
+ return []
+
+ hosts = self.__groups[group[0]]
+ #print "The hosts we got are ",hosts
+
+ if not pattern:
+ #return all of them
+ if not exclude:
+ #print "Returning back the hosts ",hosts
+ return hosts
+ else:
+ #get the difference of 2 sets
+ return list(set(hosts()).difference(set(exclude)))
+ else:
+ #it seems there is a pattern
+ if exact:
+ #there is no mean to check here for exclude list ...
+ if type(pattern)==str:
+ for g in hosts:
+ if g.lower() == pattern.lower():
+ return [g]
+ else:
+ #sometimes we pass all list to compare em
+ tmp = []
+ for p in pattern:
+ if p.lower() in hosts:
+ tmp.append(p)
+ return tmp
+ return []
+
+ else:#not exact match
+ if not exclude:#there is no list to exclude
+ tmp_l = set()
+ for g in hosts:
+ if pattern.lower() in g.lower():
+ tmp_l.add(g)
+ return list(tmp_l)
+ else:
+ tmp_l = set()
+ for g in hosts:
+ if pattern.lower() in g.lower():
+ tmp_l.add(g)
+ #get the difference of 2 sets
+ return list(tmp_l.difference(set(exclude)))
+
+ #shouldnt come here actually
+ return []
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/overlord/group/sqlite_backend.py
^
|
@@ -0,0 +1,243 @@
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy import ForeignKey,Column,Integer,String
+from sqlalchemy.orm import relation, backref
+from sqlalchemy.orm import scoped_session
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy import not_
+
+Base = declarative_base()
+class Group(Base):
+ """
+ Group Table
+ """
+ __tablename__ = 'groups'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(100),nullable=False,unique=True)
+
+ def __init__(self,name):
+ self.name = name
+
+ def __repr__(self):
+ return "<Group('%s')>" % (self.name)
+
+class Host(Base):
+ """
+ Hosts Table
+ """
+
+ __tablename__ = 'hosts'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(100), nullable=False,unique=True)
+ group_id = Column(Integer, ForeignKey('groups.id'))
+
+ group = relation(Group, backref=backref('hosts', order_by=id))
+
+ def __init__(self, name,group_id):
+ self.name = name
+ self.group_id = group_id
+
+ def __repr__(self):
+ return "<Host('%s')>" % self.name
+
+
+from func.commonconfig import OVERLORD_CONFIG_FILE,OverlordConfig
+CONF_FILE = OVERLORD_CONFIG_FILE
+DB_PATH = "/var/lib/certmaster/groups.db"
+from certmaster.config import read_config
+from func.overlord.group.base import BaseBackend
+import os
+
+class SqliteBackend(BaseBackend):
+ """
+ Sqlite backend for groups api
+ """
+
+ def __init__(self,conf_file = None,db_file=None,*args,**kwargs):
+ """
+ Initializing the database if it doesnt exists it is created and
+ connection opened for serving nothing special
+
+ @param conf_file : Configuration file
+ @param db_file : Place of the database file override if needed
+ """
+ self.config = conf_file or CONF_FILE
+ self.config = read_config(self.config,OverlordConfig)
+ self.db_path = db_file or self.config.group_db or DB_PATH
+
+ self._recreate_session()
+
+ def _recreate_session(self):
+ if os.path.exists(self.db_path):
+ #we have it so dont have to create the databases
+ engine = create_engine('sqlite:///%s'%self.db_path)
+ else:
+ engine = create_engine('sqlite:///%s'%self.db_path)
+ Base.metadata.create_all(engine)
+
+ #create a session for querying
+ Session = scoped_session(sessionmaker(bind=engine))
+ self.session = Session()
+
+
+ def add_group(self,group,save=True):
+ """
+ Adds a group
+ """
+ #check for group first
+ gr = self._group_exists(group)
+ if gr[0]:
+ return (False,"Group already exists %s "%(gr[1]))
+
+ #add the group
+ self.session.add(Group(group))
+ self._check_commit(save)
+ return (True,'')
+
+ def add_host_to_group(self,group,host,save=True):
+ """
+ Adds a host to a group
+ """
+ try:
+ self.session.add(Host(host,self.session.query(Group).filter_by(name=group).one().id))
+ self._check_commit(save)
+ except Exception,e:
+ self._recreate_session()
+ return (False,"The host is already in database %s : %s "%(host,e))
+
+ return (True,'')
+
+ def remove_group(self,group,save=True):
+ """
+ Removes a group
+ """
+ #check for group first
+ group = self._group_exists(group)
+ if not group[0]:
+ return group
+ else:
+ group = group[1]
+
+ self.session.delete(group)
+ self._check_commit(save)
+ return (True,'')
+
+
+ def remove_host(self,group,host,save=True):
+ """
+ Remove a host from groups
+ """
+ #check for group first
+ group = self._group_exists(group)
+ if not group[0]:
+ return group
+ else:
+ group = group[1]
+ #check for dupliate
+ host_db = None
+ try:
+ host_db=self.session.query(Host).filter_by(name=host,group_id=group.id).one()
+ except Exception,e:
+ #we dont have it so we can add it
+ return (False,str(e))
+
+ self.session.delete(host_db)
+ self._check_commit(save)
+ return (True,"")
+
+ def save_changes(self):
+ """
+ Save the stuff that is in memory
+ """
+ self._check_commit()
+
+
+ def get_groups(self,pattern=None,exact=True,exclude=None):
+ """
+ Get a set of groups
+
+ @param pattern : You may request to get an exact host or
+ a one in proper pattern .
+ @param exact : Related to pattern if you should do exact
+ matching or related one.
+ @param exclude : A list to be excluded from final set
+ """
+ if not pattern:
+ #that means we want all of them
+ if not exclude:
+ return [g.name for g in self.session.query(Group).all()]
+ else:
+ return [g.name for g in self.session.query(Group).filter(not_(Group.name.in_(exclude))).all()]
+
+ else:
+ if not exact:
+ if not exclude:
+ return [g.name for g in self.session.query(Group).filter(Group.name.like("".join(["%",pattern,"%"]))).all()]
+ else:
+ return [g.name for g in self.session.query(Group).filter(Group.name.like("".join(["%",pattern,"%"]))).filter(not_(Group.name.in_(exclude))).all()]
+
+ else:
+ return [g.name for g in self.session.query(Group).filter_by(name=pattern).all()]
+
+ return []
+
+ def get_hosts(self,pattern=None,group=None,exact=True,exclude=None):
+ """
+ Get a set of hosts
+
+ @param pattern : You may request to get an exact host or
+ a one in proper pattern .
+ @param exact : Related to pattern if you should do exact
+ matching or related one.
+ @param exclude : A list to be excluded from final set
+ """
+ group = self._group_exists(group)
+ if not group[0]:
+ return []
+ else:
+ group = group[1]
+
+ if not pattern:
+ #if there is no pattern there are 2 possible options
+ if not exclude:
+ return [h.name for h in self.session.query(Host).filter_by(group_id=group.id).all()]
+ else:
+ return [h.name for h in self.session.query(Host).filter_by(group_id=group.id).filter(not_(Host.name.in_(exclude))).all()]
+
+ else:
+ #there is some pattern so we should go for it
+ if exact:
+ if type(pattern)==list or type(pattern)==set:
+ #it seems we got a list to pull from database
+ return [h.name for h in self.session.query(Host).filter_by(group_id=group.id).filter(Host.name.in_(pattern)).all()]
+ else:
+ return [h.name for h in self.session.query(Host).filter_by(name=pattern,group_id=group.id).all()]
+
+ else:
+ if not exclude:
+ return [h.name for h in self.session.query(Host).filter(Host.name.like("".join(["%",pattern,"%"]))).filter_by(group_id=group.id).all()]
+ else:
+ return [h.name for h in self.session.query(Host).filter(Host.name.like("".join(["%",pattern,"%"]))).filter_by(group_id=group.id).filter(not_(Host.name.in_(exclude))).all()]
+
+ return []
+ def _check_commit(self,commit=True):
+ """
+ A simple util that checks if we should commit
+ """
+ if commit:
+ self.session.commit()
+
+ def _group_exists(self,group):
+ """
+ Checks if a group already exists
+ """
+ try:
+ group=self.session.query(Group).filter_by(name=group).all()
+ if group and len(group)==1:
+ return (True,group[0])
+ else:
+ return (False,"Not existing group name")
+ except Exception,e:
+ return (False,str(e))
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/groups.py
^
|
@@ -1,252 +1,327 @@
-#!/usr/bin/python
-
-## func command line interface & client lib
-##
-## Copyright 2007,2008 Red Hat, Inc
-## Adrian Likins <alikins@redhat.com>
-## +AUTHORS
-##
-## This software may be freely redistributed under the terms of the GNU
-## general public license.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-##
-
-
-# this module lets you define groups of systems to work with from the
-# commandline. It uses an "ini" style config parser like:
-
-#[groupname]
-#host = foobar, baz, blip
-#subgroup = blippy
+from func.overlord.group.base import choose_backend
+import sys
+import fnmatch
+def get_hosts_spec(spec):
+ """
+ A simple call to Minions class to be
+ able to use globbing in groups when
+ querying ...
+ """
+ from func.overlord.client import Minions
-import ConfigParser
-import sys
-GROUP_FILE = "/etc/func/groups"
+ m = Minions(spec)
+ return m.get_hosts_for_spec(spec)
class Groups(object):
- def __init__(self, filename=None):
+ def __init__(self,*args,**kwargs):
"""
- Get the file into the memory
+ Initialize the backend you are going to use
"""
- if filename:
- self.__filename = filename
+ #initialize here the backend
+ if 'get_hosts_for_spec' in kwargs:
+ self.get_hosts_for_spec = kwargs['get_hosts_for_spec']
+ del kwargs['get_hosts_for_spec']
else:
- self.__filename = GROUP_FILE
+ # fallback/legacy only - won't work for puppet or other minion types
+ self.get_hosts_for_spec = get_hosts_spec
+
+ self.backend = choose_backend(**kwargs)
- self.__groups = {}
- self.__parse()
- def __parse(self):
-
- self.cp = ConfigParser.SafeConfigParser()
- self.cp.read(self.__filename)
-
- #loop through the group_names
- for section in self.cp.sections():
- options = self.cp.options(section)
- for option in options:
- if option == "host":
- self.add_hosts_to_group(section, self.cp.get(section, option))
- #not implemented yet do we need it ?
- if option == "subgroup":
- continue
-
-
def show(self):
- print self.cp.sections()
- print self.__groups
+ """
+ Prints some info about current structure
+ """
+ groups = self.get_groups()
+ for g in groups:
+ print "Group : %s"%g
+ hosts = self.get_hosts(group=g)
+ if hosts:
+ for h in hosts:
+ print "\t Host : %s "%h
+
+
+
- def __parse_hoststrings(self, hoststring):
+ def __parse_strings(self, hoststring):
+ """
+ the host string maybe in 2 forms
+ the first one is i it can be comma separated into the
+ configuration file the second one
+ is it can be ; separated and entered from
+ commandline so should consider both situations
+
+ @param hoststring : String to be parsed
+ """
hosts = []
- #the host string maybe in 2 forms
- #the first one is i it can be comma separated into the
- #configuration file
- #the second one is it can be ; separated and entered from
- #commandline so should consider both situations
+
if hoststring.find(';') != -1:
bits = hoststring.split(';')
elif hoststring.find(',') != -1:
bits = hoststring.split(',')
else:
- #sometimes we have only one entry there so that will be a problem if dont have
+ #sometimes we have only one entry there so that will be a problem if dont have
#a control for it will be missed otherwise :)
if len(hoststring)!=0:
hosts.append(hoststring)
return hosts
- #now append the god ones
- for bit in bits:
- bit = bit.strip().split(' ')
- for host in bit:
- if host not in hosts:
- hosts.append(host.strip())
+ return list(set(bits))
- return hosts
- def add_hosts_to_group(self, group, hoststring,save = False):
+ def add_group(self,group_name,save=True):
"""
- Here you can add more than one hosts to a given group
+ Adding a new group
+
+ @param group_name : Group to be added
+ @param save : Save now or keep in memory and save later
"""
- hosts = self.__parse_hoststrings(hoststring)
- #the user may left the host = empty at the beginning
- if not hosts:
- self.__groups[group] = []
- return
+ return self.backend.add_group(group_name,save)
+
+ def add_hosts_to_group_glob(self,group,hoststring,exclude_string=None):
+ """
+ With that method we will be able add lots of machines by single
+ glob string ...
+
+ @param group : Group name that will add the hosts
+ @param hoststring : Glob string to be added you can
+ add something like "www*" easy and fast
+ @param save : Save now or keep in memory and save later
+ @param exclude_string :Glob string to be excluded you can
+ add something like "www*" easy and fast
+ """
+ hoststring = self.get_hosts_for_spec(hoststring)
+ if exclude_string :
+ e_s = self.get_hosts_for_spec(exclude_string)
+ hoststring = hoststring.difference(e_s)
+
+ #add them to backend
+ self.add_host_list(group,list(hoststring))
+
+ def add_hosts_to_group(self, group, hoststring):
+ """
+ Here you can add more than one hosts to a given group
+
+ @param group : Group name that will add the hosts
+ @param hoststring : A string in form of "host1;host2" or comma
+ separated one (will be parsed) ...
+
+ """
+ hosts = self.__parse_strings(hoststring)
for host in hosts:
- self.add_host_to_group(group, host)
+ self.add_host_to_group(group, host,save=False)
+ self.save_changes()
- def add_host_to_group(self, group, host,save = False):
+ def add_host_to_group(self, group, host, save =True):
"""
Add a single host to group
+
+ @param group : Group name that will add the hosts
+ @param save : Save now or keep in memory and save later
+ @param host : Host to be added
"""
- if not self.__groups.has_key(group):
- self.__groups[group] = []
-
- #dont want duplicates
- if not host in self.__groups[group]:
- self.__groups[group].append(host)
+ return self.backend.add_host_to_group(group,host,save)
- def add_host_list(self,group,host_list,save = False):
+ def add_host_list(self,group,host_list):
"""
Similar as other add methods but accepts a list of hosts
instead of some strings
+
+ @param group : Group name that will add the hosts
+ @param host_list : Host list
+
"""
- if type(host_list) != list:
- sys.stderr.write("We accept only lists for for add_host_list method")
+ if type(host_list) != list and type(host_list)!=set:
+ sys.stderr.write("We accept only lists for for add_host_list method we got %s : %s "%(host_list,type(host_list)))
return
for host in host_list:
- self.add_host_to_group(group,host)
+ self.add_host_to_group(group, host)
+
+ self.save_changes()
+
+
+ def get_groups(self,pattern=None,exact=True,exclude=None):
+ """
+ Get a list fo groups according to args
+
+ @param pattern : A string to match name of the group
+ @param exact : When true pattern matching is exact
+ else it gets the ones that are related
+ @param exclude : A list of excluded groups useful in globbing
+ """
+ return self.backend.get_groups(pattern,exact,exclude)
- if save:
- self.save_changes()
+ def get_groups_glob(self,group_string,exclude_string=None):
+ """
+ Get groups via glob strings
+ @param group_string : The ones that we want to pull
+ @param exclude_string : The ones we dont want
+ """
+ all_groups = self.get_groups()
+ match_groups = fnmatch.filter(all_groups,group_string)
+
+ if exclude_string:
+ exclude_groups = fnmatch.filter(all_groups,exclude_string)
+ return list(set(match_groups).difference(set(exclude_groups)))
+ else:
+ return match_groups
- def get_groups(self):
+ def get_hosts(self,pattern=None,group=None,exact=True,exclude=None):
"""
- Simple getter
+ Getting the list of hosts according to args
+
+ @param pattern : A string to match name of the host
+ @param exact : When true pattern matching is exact
+ else it gets the ones that are related
+ @param exclude : A list of excluded hosts useful in globbing
"""
- return self.__groups
+
+ return self.backend.get_hosts(pattern,group,exact,exclude)
def get_group_names(self):
"""
Getting the groups names
+ HERE ONLY FOR API COMPATIBILITY
+ use get_groups() instead of that one
"""
- return self.__groups.keys()
+ return self.get_groups()
- def get_hosts_by_group_glob(self, group_glob_str):
+
+ def _get_host_list_from_glob(self,group_globs,include_host):
"""
- What is that one ?
+ A private util method that is responsible for
+ extracting a list of hosts from a glob str
"""
- #split it if we have more than one
- group_gloobs = group_glob_str.split(';')
- hosts = []
- for group_gloob in group_gloobs:
- #the group globs are signed by @
- if not group_gloob[0] == "@":
+ for group_glob in group_globs:
+ if group_glob[0] != "@":
continue
- if self.__groups.has_key(group_gloob[1:]):
- hosts.extend(self.__groups[group_gloob[1:]])
- else:
- sys.stderr.write("group %s not defined\n" % group_gloob)
- #get the hosts
- return hosts
+ group_glob = group_glob[1:]
+ #we seek for @group:ww* thing here
+ if group_glob.find(":")!=-1:
+ group_str,host_str = group_glob.split(":")
+ hosts = self.get_hosts_for_spec(host_str)
+ #print "The hosts are ",hosts
+ include_host=include_host.union(set(self.get_hosts(pattern=hosts,group=group_str,exact=True)))
+ else:
+ for host_str in self.get_hosts(group=group_glob):
+ include_host = include_host.union(set(self.get_hosts_for_spec(host_str)))
+
+ return include_host
+
+ def get_hosts_glob(self,host_string,exclude_string=None):
+ """
+ Get hosts via globbing
+
+ @param host_string : The string that includes the hosts we
+ want.Example @grname:ww*;@gr2
+ @param exclude_string :The string that includes the hosts we
+ dont want.Example @grname:ww*;@gr2
+ """
+
+ group_globs = host_string.split(';')
+ include_host = set()
+ include_host = self._get_host_list_from_glob(group_globs,include_host)
+
+ #if you have a list to exclude
+ if exclude_string:
+ exclude_globs = exclude_string.split(';')
+ exclude_host = set()
+ exclude_host = self._get_host_list_from_glob(exclude_globs,exclude_host)
+ return list(include_host.difference(exclude_host))
+ else:
+ return list(include_host)
- def save_changes(self):
+
+ def get_hosts_by_group_glob(self, group_glob_str):
"""
- Write changes to disk
+ Here only for API COMPATIBILITY ...
+ use more advanced one get_hosts_glob() method
"""
- for group_name,group_hosts in self.__groups.iteritems():
- #if we have added a new group add it to config object
- if not group_name in self.cp.sections():
- self.cp.add_section(group_name)
- self.cp.set(group_name,"host",",".join(group_hosts))
- #print "Im in save changes and here i have : ",self.cp.get(group_name,"host")
-
- #store tha changes
- conf_file = open(self.__filename, "w")
- self.cp.write(conf_file)
- conf_file.close()
-
+ return self.get_hosts_glob(group_glob_str)
- def remove_group(self,group_name,save=False):
+ def remove_group(self,group,save=True):
"""
Removing a group if needed
+
+ @param group : Group to be removed
+ @param save : Save now or keep in memory and save later
+ """
+
+ return self.backend.remove_group(group,save)
+
+ def remove_group_glob(self,group_str):
+ """
+ Removing group via a glob
+ """
+ #firstly get all groups availible
+ all_groups = self.get_groups()
+ remove_groups = fnmatch.filter(all_groups,group_str)
+ #remove them
+ self.remove_group_list(remove_groups)
+
+ def remove_group_list(self,group_list):
"""
- if not self.__groups.has_key(group_name):
+ Removes a group of list
+ """
+
+ if type(group_list) != list:
+ sys.stderr.write("We accept only lists for for remove_group_list method")
return False
- #delete that entry
- if group_name in self.cp.sections():
- #if we have it also here should remove it
- if self.cp.has_section(group_name):
- self.cp.remove_section(group_name)
- #delete the entry
- del self.__groups[group_name]
-
- #Do you want to store it ?
- if save:
- self.save_changes()
- return True
- def remove_host(self,group_name,host,save=False):
+ for g in group_list:
+ self.remove_group(g,save=False)
+ self.save_changes()
+
+
+ def remove_host(self,group_name,host,save=True):
"""
Removes a proper host from the conf file
"""
- if not self.__groups.has_key(group_name) or not host in self.__groups[group_name]:
- return False
-
- #remove the machine from there
- self.__groups[group_name].remove(host)
- #save to config file
- if save:
- self.save_changes()
+ return self.backend.remove_host(group_name,host,save)
+
- return True
+ def remove_host_glob(self,group_name,host_str,exclude_string=None):
+ copy_host_str = host_str
+ host_str = self.get_hosts_for_spec(host_str)
+ if exclude_string:
+ e_s = self.get_hosts_for_spec(exclude_string)
+ host_str = host_str.difference(e_s)
+
+ #remove the list completely
+ if not host_str: #sometimes we may have some old entries into db so
+ #that will not make a match in that case
+ self.remove_host_list(group_name,[copy_host_str])
+ else:
+ self.remove_host_list(group_name,host_str)
- def remove_host_list(self,group,host_list,save = False):
+ def remove_host_list(self,group,host_list):
"""
Remove a whole list from the conf file of hosts
"""
- if type(host_list) != list:
- sys.stderr.write("We accept only lists for for add_host_list method")
+ if type(host_list) != list and type(host_list) != set :
+ sys.stderr.write("We accept only lists for for remove_host_list method")
return False
for host in host_list:
- self.remove_host(group,host,save = False)
-
- if save:
- self.save_changes()
+ self.remove_host(group,host,save=False)
+ self.save_changes()
-
- def add_group(self,group_name,save=False):
+ def save_changes(self):
"""
- Adding a new group
+ Write changes to disk
"""
- if self.__groups.has_key(group_name):
- return False
- #create with an empty list
- self.__groups[group_name] = []
- if save:
- self.save_changes()
-
- return True #success
-
+ self.backend.save_changes()
-def main():
- g = Groups("/tmp/testgroups")
- print g.show()
-
if __name__ == "__main__":
- main()
+ pass
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/inventory.py
^
|
@@ -22,6 +22,7 @@
import xmlrpclib
from func.minion import sub_process
import func.overlord.client as func_client
+from func.overlord.command import FuncOptionParser
import func.utils as utils
DEFAULT_TREE = "/var/lib/func/inventory/"
@@ -30,11 +31,11 @@
class FuncInventory(object):
def __init__(self):
- pass
+ pass
- def run(self,args):
+ def run(self,args):
- p = optparse.OptionParser()
+ p = FuncOptionParser(version=True)
p.add_option("-v", "--verbose",
dest="verbose",
action="store_true",
@@ -77,7 +78,7 @@
# see what modules each host provides (as well as what hosts we have)
host_methods = func_client.Overlord(options.server_spec).system.list_methods()
-
+
# call all remote info methods and handle them
if options.verbose:
print "- scanning ..."
@@ -86,8 +87,8 @@
for (host, methods) in host_methods.iteritems():
if utils.is_error(methods):
- sys.stderr.write("-- connection refused: %s\n" % host)
- break
+ sys.stderr.write("-- connection refused: %s\n" % host)
+ continue
for each_method in methods:
@@ -105,7 +106,7 @@
if not "all" in filtered_function_list and not method_name in filtered_function_list:
continue
-
+
overlord = func_client.Overlord(host,noglobs=True) # ,noglobs=True)
results = getattr(getattr(overlord,module_name),method_name)()
if self.options.verbose:
@@ -136,12 +137,12 @@
return pprint.pformat(data)
- # FUTURE: skvidal points out that guest symlinking would be an interesting feature
+ # FUTURE: skvidal points out that guest symlinking would be an interesting feature
def save_results(self, options, host_name, module_name, method_name, results):
dirname = os.path.join(options.tree, host_name, module_name)
if not os.path.exists(dirname):
- os.makedirs(dirname)
+ os.makedirs(dirname)
filename = os.path.join(dirname, method_name)
results_file = open(filename,"w+")
data = self.format_return(results)
@@ -150,12 +151,12 @@
def git_setup(self,options):
if options.nogit:
- return
+ return
if not os.path.exists("/usr/bin/git"):
sys.stderr.write("git-core is not installed, so no change tracking is available.\n")
sys.stderr.write("use --no-git or, better, just install it.\n")
- sys.exit(411)
-
+ sys.exit(411)
+
if not os.path.exists(options.tree):
os.makedirs(options.tree)
dirname = os.path.join(options.tree, ".git")
@@ -176,7 +177,7 @@
return
else:
if options.verbose:
- print "- updating git"
+ print "- updating git"
mytime = time.asctime()
cwd = os.getcwd()
os.chdir(options.tree)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/mapper.py
^
|
@@ -2,7 +2,7 @@
## func topology map-building tool
## If you've got a giant, tangled, complex web of func overlords
## and minions, this tool will help you construct or augment a map
-## of your func network topology so that delegating commands to
+## of your func network topology so that delegating commands to
## minions and overlords becomes a simple matter.
##
## Copyright 2008, Red Hat, Inc.
@@ -20,6 +20,7 @@
import sys
import func.yaml as yaml
import func.overlord.client as func_client
+from func.overlord.command import FuncOptionParser
from func import utils
@@ -29,10 +30,10 @@
def __init__(self):
pass
-
+
def run(self,args):
-
- p = optparse.OptionParser()
+
+ p = FuncOptionParser(version=True)
#currently not implemented
p.add_option("-a", "--append",
dest="append",
@@ -46,29 +47,29 @@
dest="verbose",
action="store_true",
help="provide extra output")
-
+
(options, args) = p.parse_args(args)
self.options = options
-
+
if options.verbose:
print "- recursively calling map function"
-
+
self.build_map()
-
+
return 1
-
+
def build_map(self):
-
+
minion_hash = func_client.Overlord("*").overlord.map_minions(self.options.only_alive==True)
-
+
for minion in minion_hash.keys(): #clean hash of any top-level errors
if utils.is_error(minion_hash[minion]):
- minion_hash[minion] = {}
+ minion_hash[minion] = {}
if self.options.verbose:
print "- built the following map:"
print minion_hash
-
+
if self.options.append:
try:
oldmap = file(DEFAULT_TREE, 'r').read()
@@ -77,23 +78,22 @@
except e:
sys.stderr.write("ERROR: old map could not be read, append failed\n")
sys.exit(-1)
-
+
merged_map = {}
merged_map.update(old_hash)
merged_map.update(minion_hash)
-
+
if self.options.verbose:
print "- appended new map to the following map:"
print old_hash
print " resulting in:"
print merged_map
-
+
minion_hash = merged_map
-
+
if self.options.verbose:
print "- writing to %s" % DEFAULT_TREE
-
+
mapfile = file(DEFAULT_TREE, 'w')
data = yaml.dump(minion_hash)
mapfile.write(data)
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/modules/copyfile.py
^
|
@@ -1,8 +1,11 @@
-from func.overlord import overlord_module
+
import os
import stat
+import sys
import xmlrpclib
+from func.overlord import overlord_module
+
class copyfile(overlord_module.BaseModule):
def send(self, localpath, remotepath, bufsize=60000):
try:
@@ -18,11 +21,11 @@
self.parent.run("copyfile", "open", [remotepath, mode, uid, gid])
- while True:
+ while True:
data=f.read(bufsize)
if data:
self.parent.run("copyfile", "append", [remotepath, xmlrpclib.Binary(data)])
else:
break
-
+
return True
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/overlord/modules/getfile.py
^
|
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+#Copyright (C) 2010 Louis-Frederic Coilliot
+#
+#This program is free software: you can redistribute it and/or modify
+#it under the terms of the GNU General Public License version 3 as
+# published by the Free Software Foundation.
+#
+#This program is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+#
+#You should have received a copy of the GNU General Public License
+#along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Get a file on the minions, chunk by chunk. Overlord side."""
+from func.overlord import overlord_module
+import os
+
+try:
+ # py 2.4+
+ import hashlib
+except ImportError:
+ # py 2.3 support for RHEL4
+ import sha
+ class hashlib:
+ @staticmethod
+ def new(algo):
+ if algo == 'sha1':
+ return sha.new()
+ raise ValueError, "Bad checksum type"
+try:
+ # py 2.4+
+ from base64 import b64decode
+except ImportError:
+ # py 2.3 support for RHEL4
+ from base64 import decodestring as b64decode
+
+class getfile(overlord_module.BaseModule):
+ """Get a file on the minions"""
+ def get(self, source, foldertarget):
+ """Get a file on the minions, chunk by chunk. Save the files locally"""
+ chunkslendict = self.parent.run("getfile", "chunkslen", [ source ])
+ # Exclude results for minions with a REMOTE_ERROR
+ chunkslens = [ clen for clen in chunkslendict.values()\
+ if (type(clen) == type(int())) ]
+ maxchunk = max(chunkslens)
+
+ if maxchunk == -1:
+ msg = 'Unable to open the file on the minion(s)'
+ status = 1
+ return status, msg
+
+ if not os.path.isdir(foldertarget):
+ try:
+ os.mkdir(foldertarget)
+ except OSError:
+ msg = 'Problem during the creation of the folder %s'\
+ % (foldertarget)
+ status = 1
+ return status, msg
+
+ if not os.access(foldertarget, os.W_OK):
+ msg = 'The folder %s is not writeable' % (foldertarget)
+ status = 1
+ return status, msg
+
+ nullsha = hashlib.new('sha1').hexdigest()
+ sourcebasename = os.path.basename(source)
+ excluderrlist = []
+
+ for chunknum in range(maxchunk):
+ currentchunks = self.parent.run("getfile", "getchunk",
+ [chunknum, source]).items()
+ for minion, chunkparams in currentchunks:
+ if minion in excluderrlist:
+ # previous error reported
+ continue
+ try:
+ checksum, chunk = chunkparams
+ except ValueError:
+ # Probably a REMOTE_ERROR
+ excluderrlist.append(minion)
+ continue
+ mysha = hashlib.new('sha1')
+ mysha.update(chunk)
+ if checksum == -1:
+ # On this minion there was no file to get
+ continue
+ if mysha.hexdigest() == nullsha:
+ # On this minion there is no more chunk to get
+ continue
+ minionfolder = foldertarget+'/'+minion
+ if mysha.hexdigest() == checksum:
+ if not os.path.isdir(minionfolder):
+ try:
+ os.mkdir(minionfolder)
+ except OSError:
+ excluderrlist.append(minion)
+ continue
+ if chunknum == 0:
+ fic = open(minionfolder+'/'+sourcebasename, 'w')
+ else:
+ fic = open(minionfolder+'/'+sourcebasename, 'a')
+ fic.write(b64decode(chunk))
+ fic.close()
+ else:
+ # Error - checksum failed during copy
+ # Delete the partial file
+ # Abort the copy for this minion
+ excluderrlist.append(minion)
+ os.remove(minionfolder+'/'+sourcebasename)
+ return 0, foldertarget
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/modules/netapp.py
^
|
@@ -45,10 +45,10 @@
return _(self.netapp.vol.destroy(self.filer, vol)[self.admin_host])
def offline_volume(self, vol):
- return _(self.netapp.vol.offline(self.filer, vol)[self.admin_host])
+ return _(self.netapp.vol.offline(self.filer, vol)[self.admin_host])
def online_volume(self, vol):
- return _(self.netapp.vol.online(self.filer, vol)[self.admin_host])
+ return _(self.netapp.vol.online(self.filer, vol)[self.admin_host])
def get_volume_size(self, vol):
return _(self.netapp.vol.size(self.filer, vol)[self.admin_host])
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/modules/utils.py
^
|
@@ -6,7 +6,7 @@
class utils(overlord_module.BaseModule):
def __diff_dicts(self, a, b):
return dict([(k, v) for k, v in a.iteritems() if k not in b])
-
+
def async_poll(self, job_id, partial_func=None, interval=0.5):
async_done = False
@@ -37,4 +37,4 @@
time.sleep(interval)
def list_minions(self):
- return self.parent.minions
+ return self.parent.minions
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/func/overlord/scripts.py
^
|
@@ -0,0 +1,52 @@
+# python modules for doing normal/standard things with func command scripts
+# parsing/checking for errors
+# returning hosts
+# returning results
+# standard option parser for --forks, --outputpath, --timeout, --hosts-from-file, --
+
+
+from optparse import OptionParser
+import sys
+
+
+def base_func_parser(opthosts=True, outputpath=True, forkdef=40, timeoutdef=300):
+ parser = OptionParser()
+ if opthosts:
+ parser.add_option('--host', default=[], action='append',
+ help="hosts to act on, defaults to ALL")
+ parser.add_option('--hosts-from-file', default=None, dest="hostfile",
+ help="read list of hosts from this file, if '-' read from stdin")
+
+ parser.add_option('--timeout', default=timeoutdef, type='int',
+ help='set the wait timeout for func commands')
+ parser.add_option('--forks', default=forkdef, type='int',
+ help='set the number of forks to start up')
+ parser.add_option('-d', '--delegate', default=None, action="store_true",
+ help="use delegation to make function call")
+ parser.add_option('--no-delegate', default=None, dest="delegate", action="store_false",
+ help="disable delegation when making function call")
+ if outputpath:
+ parser.add_option('--outputpath', default='/var/lib/func/data/', dest="outputpath",
+ help="basepath to store results/errors output.")
+ return parser
+
+def handle_base_func_options(parser, opts):
+ if hasattr(opts, 'hostfile') and opts.hostfile:
+ hosts = []
+ if opts.hostfile == '-':
+ hosts = sys.stdin.readlines()
+ else:
+ hosts = open(opts.hostfile, 'r').readlines()
+
+ for hn in hosts:
+ hn = hn.strip()
+ if hn.startswith('#'):
+ continue
+ hn = hn.replace('\n', '')
+ opts.host.append(hn)
+
+ return opts
+
+
+def errorprint(msg):
+ print >> sys.stderr, msg
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/overlord/sslclient.py
^
|
@@ -23,25 +23,31 @@
# Yay for Python 2.2
pass
_host, _port = urllib.splitport(host)
- return SSLCommon.HTTPS(_host, int(_port), ssl_context=self.ssl_ctx, timeout=self._timeout)
+ if hasattr(xmlrpclib.Transport, 'single_request'):
+ cnx_class = SSLCommon.HTTPSConnection
+ else:
+ cnx_class = SSLCommon.HTTPS
+ return cnx_class(_host, int(_port), ssl_context=self.ssl_ctx, timeout=self._timeout)
class SSLXMLRPCServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, pkey_file, cert_file, ca_cert_file, timeout=None):
self.ctx = SSLCommon.CreateSSLContext(pkey_file, cert_file, ca_cert_file)
- xmlrpclib.ServerProxy.__init__(self, uri, SSL_Transport(ssl_context=self.ctx, timeout=timeout))
+ xmlrpclib.ServerProxy.__init__(self, uri, SSL_Transport(ssl_context=self.ctx, timeout=timeout), allow_none=True)
class FuncServer(SSLXMLRPCServerProxy):
- def __init__(self, uri, pem=None, crt=None, ca=None):
+ def __init__(self, uri, pem=None, crt=None, ca=None, timeout=None):
self.pem = pem
self.crt = crt
self.ca = ca
+ self.timeout = timeout
SSLXMLRPCServerProxy.__init__(self, uri,
self.pem,
self.crt,
- self.ca)
+ self.ca,
+ self.timeout)
if __name__ == "__main__":
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/utils.py
^
|
@@ -12,7 +12,21 @@
import inspect
import os
+import pwd
+import socket
import string
+import sys
+import re
+import fnmatch
+import tempfile
+import glob
+from stat import *
+
+
+from certmaster.config import read_config
+from certmaster.commonconfig import MinionConfig
+from commonconfig import FuncdConfig
+
REMOTE_ERROR = "REMOTE_ERROR"
@@ -32,7 +46,7 @@
That method will be used to clean some
glob adress expressions because async stuff
depends on that part
-
+
@param dirty_word : word to be cleaned
"""
from copy import copy
@@ -51,9 +65,399 @@
return job_id
def is_public_valid_method(obj, attr, blacklist=[]):
- if inspect.ismethod(getattr(obj, attr)) and attr[0] != '_':
- for b in blacklist:
- if attr==b:
- return False
+ # Note: the order can be important here. func modules that try to inspect
+ # the list of available methods may run into inifinite recursion issues if
+ # getattr() is called on them. They can work around this by placing the
+ # problematic code in a private method that's called from their public
+ # method if we perform the check for a leading underscore before the check
+ # that calls getattr()
+ if attr[0] != '_' and attr not in blacklist and \
+ inspect.ismethod(getattr(obj, attr)):
return True
return False
+
+def get_hostname_by_route():
+ """
+ "localhost" is a lame hostname to use for a key, so try to get
+ a more meaningful hostname. We do this by connecting to the certmaster
+ and seeing what interface/ip it uses to make that connection, and looking
+ up the hostname for that.
+ """
+ # FIXME: this code ignores http proxies (which granted, we don't
+ # support elsewhere either.
+
+ minion_config_file = '/etc/func/minion.conf'
+ minion_config = read_config(minion_config_file, FuncdConfig)
+
+ # don't bother guessing a hostname if they specify it in the config file
+ if minion_config.minion_name:
+ return minion_config.minion_name.lower()
+
+ # try to find the hostname attached to the ip of the interface that we use
+ # to talk to the certmaster
+ cm_config_file = '/etc/certmaster/minion.conf'
+ cm_config = read_config(cm_config_file, MinionConfig)
+
+ server = cm_config.certmaster
+ port = cm_config.certmaster_port
+
+ s = socket.socket()
+ s.settimeout(5)
+ s.connect_ex((server, port))
+ (intf, port) = s.getsockname()
+ s.close()
+
+ try:
+ return socket.gethostbyaddr(intf)[0]
+ except:
+ pass
+
+ # try to find the hostname of the ip we're listening on
+ if minion_config.listen_addr:
+ try:
+ return socket.gethostbyaddr(minion_config.listen_addr)[0]
+ except:
+ pass
+
+ # in an ideal world, this would return exactly what we want: the most meaningful hostname
+ # for a system, but that is often not that case
+ try:
+ hostname = socket.gethostname()
+ ip = socket.gethostbyname(hostname)
+ if ip != "127.0.0.1" and ip != "::1":
+ return hostname.lower()
+ except:
+ pass
+
+ # all else has failed to get a good hostname, so just return
+ # an ip address
+ return intf
+
+def find_files_by_hostname(hostglob, filepath, fileext=''):
+ """look for files in the given filepath with the given extension that
+ match our hostname, but case insensitively. This handles the
+ craziness that is dns names that have mixed case :("""
+
+ # this is a little like a case insensitive glob, except it's just one
+ # layer deep - not multiple layers
+
+ if fileext and fileext[0] != '.':
+ fileext = '.' + fileext
+ thisregex = fnmatch.translate('%s%s' % (hostglob, fileext))
+ recomp = re.compile(thisregex, re.I) # case insensitive match
+ files = []
+ for potfile in os.listdir(filepath):
+ if recomp.match(potfile):
+ files.append(potfile)
+
+ return [os.path.normpath(filepath + '/' + file) for file in files]
+
+
+def get_all_host_aliases(hostname):
+ try:
+ (fqdn, aliases, ips) = socket.gethostbyname_ex(hostname)
+ except socket.gaierror, e:
+ return [hostname]
+ else:
+ return [fqdn] + aliases
+
+def get_fresh_method_instance(function_ref):
+ """
+ That method is kind of workaround to not break the
+ current api in order to add logging capabilities per
+ method level. When methods are executed during xmlrpc
+ calls we have a pool of references with module methods
+ and overlord call them. If we want to pass those methods
+ different logger instances in order to have log call per
+ job_ids we shouldnt have the same method reference to be
+ called,we need fresh ones so that is how we solve that
+ kind of hacky ...
+ """
+
+ #CAUTION HACKY IF STATEMNETS AROUND :)
+ # we dont want private methods and system
+ #modules around ,we should change system
+ #module though ....
+ if function_ref.__name__.startswith("_"):
+ return function_ref
+ else:
+ try:
+ fresh_instance = function_ref.im_self.__class__()
+ except Exception,e:
+ #something went wrong so we return the normal reference value
+ return function_ref
+ try:
+ return getattr(fresh_instance,function_ref.__name__)
+ except AttributeError:
+ return getattr(fresh_instance,function_ref._name_)
+
+def should_log(args):
+ if args and type(args[len(args)-1]) == dict and args[len(args)-1].has_key('__logger__') and args[len(args)-1]['__logger__'] == True:
+ return True
+ return False
+
+_re_compiled_glob_match = None
+def re_glob(s):
+ """ Tests if a string is a shell wildcard. """
+ # TODO/FIXME maybe consider checking if it is a stringsType before going on - otherwise
+ # returning None
+ global _re_compiled_glob_match
+ if _re_compiled_glob_match is None:
+ _re_compiled_glob_match = re.compile('[*?]|\[.+\]').search
+ return _re_compiled_glob_match(s)
+
+def getCacheDir(tmpdir='/var/tmp', reuse=True, prefix='func-'):
+ """return a path to a valid and safe cachedir - only used when not running
+ as root or when --tempcache is set"""
+
+ uid = os.geteuid()
+ try:
+ usertup = pwd.getpwuid(uid)
+ username = usertup[0]
+ except KeyError:
+ return None # if it returns None then, well, it's bollocksed
+
+ if reuse:
+ # check for /var/tmp/func-username-* -
+ prefix = '%s%s-' % (prefix, username)
+ dirpath = '%s/%s*' % (tmpdir, prefix)
+ cachedirs = sorted(glob.glob(dirpath))
+ for thisdir in cachedirs:
+ stats = os.lstat(thisdir)
+ if S_ISDIR(stats[0]) and S_IMODE(stats[0]) == 448 and stats[4] == uid:
+ return thisdir
+
+ # make the dir (tempfile.mkdtemp())
+ cachedir = tempfile.mkdtemp(prefix=prefix, dir=tmpdir)
+ return cachedir
+
+
+#################### PROGRESS BAR ##################################
+# The code below can be used for progress bar purposes as we will do
+#it is a combination of http://code.activestate.com/recipes/168639/ and
+#http://code.activestate.com/recipes/475116/ and recipes for usage
+#you can look at places we use it !
+
+
+class TerminalController:
+ """
+ A class that can be used to portably generate formatted output to
+ a terminal.
+
+ `TerminalController` defines a set of instance variables whose
+ values are initialized to the control sequence necessary to
+ perform a given action. These can be simply included in normal
+ output to the terminal:
+
+ >>> term = TerminalController()
+ >>> print 'This is '+term.GREEN+'green'+term.NORMAL
+
+ Alternatively, the `render()` method can used, which replaces
+ '${action}' with the string required to perform 'action':
+
+ >>> term = TerminalController()
+ >>> print term.render('This is ${GREEN}green${NORMAL}')
+
+ If the terminal doesn't support a given action, then the value of
+ the corresponding instance variable will be set to ''. As a
+ result, the above code will still work on terminals that do not
+ support color, except that their output will not be colored.
+ Also, this means that you can test whether the terminal supports a
+ given action by simply testing the truth value of the
+ corresponding instance variable:
+
+ >>> term = TerminalController()
+ >>> if term.CLEAR_SCREEN:
+ ... print 'This terminal supports clearning the screen.'
+
+ Finally, if the width and height of the terminal are known, then
+ they will be stored in the `COLS` and `LINES` attributes.
+ """
+ # Cursor movement:
+ BOL = '' #: Move the cursor to the beginning of the line
+ UP = '' #: Move the cursor up one line
+ DOWN = '' #: Move the cursor down one line
+ LEFT = '' #: Move the cursor left one char
+ RIGHT = '' #: Move the cursor right one char
+
+ # Deletion:
+ CLEAR_SCREEN = '' #: Clear the screen and move to home position
+ CLEAR_EOL = '' #: Clear to the end of the line.
+ CLEAR_BOL = '' #: Clear to the beginning of the line.
+ CLEAR_EOS = '' #: Clear to the end of the screen
+
+ # Output modes:
+ BOLD = '' #: Turn on bold mode
+ BLINK = '' #: Turn on blink mode
+ DIM = '' #: Turn on half-bright mode
+ REVERSE = '' #: Turn on reverse-video mode
+ NORMAL = '' #: Turn off all modes
+
+ # Cursor display:
+ HIDE_CURSOR = '' #: Make the cursor invisible
+ SHOW_CURSOR = '' #: Make the cursor visible
+
+ # Terminal size:
+ COLS = None #: Width of the terminal (None for unknown)
+ LINES = None #: Height of the terminal (None for unknown)
+
+ # Foreground colors:
+ BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
+
+ # Background colors:
+ BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
+ BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
+
+ _STRING_CAPABILITIES = """
+ BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
+ CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
+ BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
+ HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
+ _COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
+ _ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
+
+ def __init__(self, term_stream=sys.stdout):
+ """
+ Create a `TerminalController` and initialize its attributes
+ with appropriate values for the current terminal.
+ `term_stream` is the stream that will be used for terminal
+ output; if this stream is not a tty, then the terminal is
+ assumed to be a dumb terminal (i.e., have no capabilities).
+ """
+ # Curses isn't available on all platforms
+ try: import curses
+ except: return
+
+ # If the stream isn't a tty, then assume it has no capabilities.
+ if not term_stream.isatty(): return
+
+ # Check the terminal type. If we fail, then assume that the
+ # terminal has no capabilities.
+ try: curses.setupterm()
+ except: return
+
+ # Look up numeric capabilities.
+ self.COLS = curses.tigetnum('cols')
+ self.LINES = curses.tigetnum('lines')
+
+ # Look up string capabilities.
+ for capability in self._STRING_CAPABILITIES:
+ (attrib, cap_name) = capability.split('=')
+ setattr(self, attrib, self._tigetstr(cap_name) or '')
+
+ # Colors
+ set_fg = self._tigetstr('setf')
+ if set_fg:
+ for i,color in zip(range(len(self._COLORS)), self._COLORS):
+ setattr(self, color, curses.tparm(set_fg, i) or '')
+ set_fg_ansi = self._tigetstr('setaf')
+ if set_fg_ansi:
+ for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
+ setattr(self, color, curses.tparm(set_fg_ansi, i) or '')
+ set_bg = self._tigetstr('setb')
+ if set_bg:
+ for i,color in zip(range(len(self._COLORS)), self._COLORS):
+ setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '')
+ set_bg_ansi = self._tigetstr('setab')
+ if set_bg_ansi:
+ for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
+ setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '')
+
+ def _tigetstr(self, cap_name):
+ # String capabilities can include "delays" of the form "$<2>".
+ # For any modern terminal, we should be able to just ignore
+ # these, so strip them out.
+ import curses
+ cap = curses.tigetstr(cap_name) or ''
+ return re.sub(r'\$<\d+>[/*]?', '', cap)
+
+ def render(self, template):
+ """
+ Replace each $-substitutions in the given template string with
+ the corresponding terminal control string (if it's defined) or
+ '' (if it's not).
+ """
+ return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
+
+ def _render_sub(self, match):
+ s = match.group()
+ if s == '$$': return s
+ else: return getattr(self, s[2:-1])
+
+#######################################################################
+# Example use case: progress bar
+#######################################################################
+
+class ProgressBar:
+ """
+ A 3-line progress bar, which looks like::
+
+ Header
+ 20% [===========----------------------------------]
+ progress message
+
+ The progress bar is colored, if the terminal supports color
+ output; and adjusts to the width of the terminal.
+ """
+ BAR = '%3d%% ${WHITE}[${BLUE}%s%s${NORMAL}${WHITE}]${NORMAL}\n'
+ HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
+
+ def __init__(self, term, header,minValue = 0, maxValue = 10):
+ self.term = term
+ if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
+ raise ValueError("Terminal isn't capable enough -- you "
+ "should use a simpler progress dispaly.")
+ self.width = self.term.COLS or 75
+ self.bar = term.render(self.BAR)
+ self.header = self.term.render(self.HEADER % header.center(self.width))
+ self.cleared = 1 #: true if we haven't drawn the bar yet.
+
+ self.min = minValue
+ self.max = maxValue
+ self.span = maxValue - minValue
+ self.amount = 0 # When amount == max, we are 100% done
+
+ #initially it is 0
+ self.update(0, '')
+
+ def update(self, newAmount, message=''):
+ if newAmount < self.min: newAmount = self.min
+ if newAmount > self.max: newAmount = self.max
+ self.amount = newAmount
+
+ # Figure out the new percent done, round to an integer
+ diffFromMin = float(self.amount - self.min)
+ percentDone = (diffFromMin / float(self.span)) * 100.0
+ percentDone = round(percentDone)
+ percentDone = int(percentDone)
+
+ if self.cleared:
+ sys.stdout.write(self.header)
+ self.cleared = 0
+ n = int((self.width-10)*percentDone/100.0)
+ sys.stdout.write(
+ self.term.BOL + self.term.UP + self.term.CLEAR_EOL +
+ (self.bar % (percentDone, '='*n, '-'*(self.width-10-n))) +
+ self.term.CLEAR_EOL + message.center(self.width))
+
+ def clear(self):
+ if not self.cleared:
+ sys.stdout.write(self.term.BOL + self.term.CLEAR_EOL +
+ self.term.UP + self.term.CLEAR_EOL +
+ self.term.UP + self.term.CLEAR_EOL)
+ self.cleared = 1
+
+if __name__ == "__main__":
+ import time
+ term = TerminalController()
+ progress = ProgressBar(term, 'Progress Status',minValue=0,maxValue=5)
+ filenames = ['this', 'that', 'other', 'foo', 'bar']
+
+ for i, filename in zip(range(len(filenames)), filenames):
+ progress.update(i+1)
+ time.sleep(3)
+
+ progress.update(5,"JOB_COMPLETED")
+ #progress.clear()
+
+#################### PROGRESS BAR ##################################
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/yaml/dump.py
^
|
@@ -164,7 +164,7 @@
self.output(line)
def raiseToYamlSyntaxError(self):
- raise """
+ raise """
to_yaml should return tuple w/object to dump
and optional YAML type. Example:
({'foo': 'bar'}, '!!foobar')
@@ -193,25 +193,25 @@
occur[obid] = occur[obid] + 1
class YamlAnchors:
- def __init__(self,data):
- occur = {}
- accumulate(data,occur)
- anchorVisits = {}
- for (obid, occur) in occur.items():
- if occur > 1:
- anchorVisits[obid] = 0
- self._anchorVisits = anchorVisits
- self._currentAliasIndex = 0
- def shouldAnchor(self,obj):
- ret = self._anchorVisits.get(id(obj),None)
- if 0 == ret:
- self._currentAliasIndex = self._currentAliasIndex + 1
- ret = self._currentAliasIndex
- self._anchorVisits[id(obj)] = ret
- return ret
- return 0
- def isAlias(self,obj):
- return self._anchorVisits.get(id(obj),0)
+ def __init__(self,data):
+ occur = {}
+ accumulate(data,occur)
+ anchorVisits = {}
+ for (obid, occur) in occur.items():
+ if occur > 1:
+ anchorVisits[obid] = 0
+ self._anchorVisits = anchorVisits
+ self._currentAliasIndex = 0
+ def shouldAnchor(self,obj):
+ ret = self._anchorVisits.get(id(obj),None)
+ if 0 == ret:
+ self._currentAliasIndex = self._currentAliasIndex + 1
+ ret = self._currentAliasIndex
+ self._anchorVisits[id(obj)] = ret
+ return ret
+ return 0
+ def isAlias(self,obj):
+ return self._anchorVisits.get(id(obj),0)
### SORTING METHODS
@@ -295,7 +295,7 @@
def sloppyIsUnicode(data):
# XXX - hack to make tests pass for 2.1
- return repr(data)[:2] == "u'" and repr(data) != data
+ return repr(data)[:2] == "u'" and repr(data) != data
import sys
if sys.hexversion < 0x20200000:
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/yaml/load.py
^
|
@@ -20,7 +20,7 @@
self._docs = []
try:
while 1:
- self._docs.append(parser.next())
+ self._docs.append(parser.next())
except StopIteration: pass
self._idx = 0
def __len__(self): return len(self._docs)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/yaml/ordered_dict.py
^
|
@@ -32,7 +32,3 @@
for key in data.keys():
print "The value for %s is %s" % (key, data[key])
print data
-
-
-
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/yaml/redump.py
^
|
@@ -19,4 +19,3 @@
dumper = Dumper()
dumper.alphaSort = 0
return dumper.dump(*docs)
-
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/yaml/stream.py
^
|
@@ -56,10 +56,10 @@
def eatNewLines(stream):
while 1:
- line = stream.get()
- if line is None or len(string.strip(line)):
- return line
-
+ line = stream.get()
+ if line is None or len(string.strip(line)):
+ return line
+
COMMENT_LINE_REGEX = re.compile(R"\s*#")
def isComment(line):
return line is not None and COMMENT_LINE_REGEX.match(line)
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/yaml/timestamp.py
^
|
@@ -29,28 +29,28 @@
siz = len(tpl)
sec = 0
if 3 == siz:
- tpl += [0,0,0,0,0,-1]
+ tpl += [0,0,0,0,0,-1]
elif 7 == siz:
- tpl.append(0)
- tpl.append(-1)
+ tpl.append(0)
+ tpl.append(-1)
elif 8 == siz:
- if len(tpl.pop()) > 0: raise ValueError(val)
- tpl.append(0)
- tpl.append(-1)
+ if len(tpl.pop()) > 0: raise ValueError(val)
+ tpl.append(0)
+ tpl.append(-1)
elif 9 == siz or 10 == siz:
- mn = int(tpl.pop())
- hr = int(tpl.pop())
- sec = (hr*60+mn)*60
- if val.find("+") > -1: sec = -sec
- if 10 == siz: tpl.pop()
- tpl.append(0)
- tpl.append(-1)
+ mn = int(tpl.pop())
+ hr = int(tpl.pop())
+ sec = (hr*60+mn)*60
+ if val.find("+") > -1: sec = -sec
+ if 10 == siz: tpl.pop()
+ tpl.append(0)
+ tpl.append(-1)
else:
- raise ValueError(val)
+ raise ValueError(val)
idx = 0
while idx < 9:
- tpl[idx] = int(tpl[idx])
- idx += 1
+ tpl[idx] = int(tpl[idx])
+ idx += 1
if tpl[1] < 1 or tpl[1] > 12: raise ValueError(val)
if tpl[2] < 1 or tpl[2] > 31: raise ValueError(val)
if tpl[3] > 24: raise ValueError(val)
@@ -69,16 +69,16 @@
class _timestamp:
def __init__(self,val=None):
if not val:
- self.__tval = time.gmtime()
+ self.__tval = time.gmtime()
else:
- typ = type(val)
- if ListType == typ:
- self.__tval = tuple(val)
- elif TupleType == typ:
- self.__tval = val
- else:
- self.__tval = _parseTime(val)
- if 9 != len(self.__tval): raise ValueError
+ typ = type(val)
+ if ListType == typ:
+ self.__tval = tuple(val)
+ elif TupleType == typ:
+ self.__tval = val
+ else:
+ self.__tval = _parseTime(val)
+ if 9 != len(self.__tval): raise ValueError
def __getitem__(self,idx): return self.__tval[idx]
def __len__(self): return 9
def strftime(self,format): return time.strftime(format,self.__tval)
@@ -103,7 +103,7 @@
_timestamp.__init__(self,val)
self.__mxdt = DateTime.mktime(self.__tval)
def __getattr__(self, name):
- return getattr(self.__mxdt, name)
+ return getattr(self.__mxdt, name)
except:
class timestamp(_timestamp): pass
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/func/yaml/ypath.py
^
|
@@ -64,7 +64,7 @@
def __setattr__(self,attname,attval):
if attname in ('parent','key','value'):
if self.__dict__.get(attname):
- raise ValueError("context is read-only")
+ raise ValueError("context is read-only")
self.__dict__[attname] = attval
def __hash__(self): return hash(self.path)
def __cmp__(self,other):
@@ -228,7 +228,7 @@
def __init__(self,key):
#TODO: Do better implicit typing
try:
- key = int(key)
+ key = int(key)
except: pass
self.key = key
def bind(self,cntx):
@@ -327,7 +327,7 @@
while 1:
cntx = self.rhs.next()
if str(cntx.value) == self.lhs: #TODO: Remove type hack
- return 1
+ return 1
except StopIteration: pass
return 0
def exists_segment(self,cntx):
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/init-scripts/funcd
^
|
@@ -41,6 +41,7 @@
RVAL=3
echo "$DAEMON is not running"
fi
+ return $RVAL
}
if [ -f /lib/lsb/init-functions ]; then
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/po/messages.pot
^
|
@@ -6,22 +6,29 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: func 0.24-1\n"
+"Project-Id-Version: func 0.28-1\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2008-12-17 15:46-0500\n"
+"POT-Creation-Date: 2011-04-07 17:18-0400\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
+"Language: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=CHARSET\n"
"Content-Transfer-Encoding: 8bit\n"
-#: func/module_loader.py:106
+#: func/module_loader.py:120
#, python-format
-msgid "Could not load %s module: %s"
+msgid "Import error while loading %s module: %s"
msgstr ""
-#: func/module_loader.py:111
+#: func/module_loader.py:127
#, python-format
msgid "Could not load %s module"
msgstr ""
+
+#: func/minion/modules/hardware.py:118
+msgid ""
+"Import error while loading smolt module. Smolt is probably not installed. "
+"This method is useless without it."
+msgstr ""
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/scripts/func
^
|
@@ -25,7 +25,11 @@
myname, argv = sys.argv[0], sys.argv[1:]
cli = func_command.FuncCommandLine()
try:
- cli.parse(argv)
+ ret = cli.parse(argv)
+ if type(ret) == type(1):
+ sys.exit(ret)
+ else:
+ sys.exit(0)
except Func_Client_Exception, e:
print "ERROR:", e
sys.exit(1)
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/scripts/func-command
^
|
@@ -0,0 +1,85 @@
+#!/usr/bin/python -tt
+# by skvidal
+# gplv2+
+
+
+import sys
+import os
+import func.overlord.client
+from func.overlord.scripts import base_func_parser, handle_base_func_options, errorprint
+from func.utils import is_error
+
+
+def main(args):
+ parser = base_func_parser(outputpath=False)
+ parser.add_option('--returncodes', action='store_true', help="prefix each line with the commands returncode")
+ parser.add_option('-1', '--oneline', action='store_true', help="output all things as one line - to make grepping easier, will not remove \\n's from output of commands, though")
+ parser.add_option('-o', '--output-to-dir', dest='output_dest', default=None,
+ help="output each hosts results to a file in a dir named for the host")
+ opts, args = parser.parse_args(args)
+ opts = handle_base_func_options(parser, opts)
+
+ if len(args) < 1:
+ errorprint(parser.format_help())
+ return 1
+
+ mycmd = ' '.join(args)
+
+ hosts ='*'
+ if opts.host:
+ hosts = ';'.join(opts.host)
+
+ if opts.output_dest:
+ if opts.output_dest[0] != '/':
+ opts.output_dest = os.path.realpath(os.path.expanduser(opts.output_dest))
+ if not os.path.exists(opts.output_dest):
+ try:
+ os.makedirs(opts.output_dest)
+ except (IOError, OSError), e:
+ print >> sys.stderr, "Could not make dir %s: %s" % (opts.output_dest, e)
+ sys.exit(1)
+
+ if not os.access(opts.output_dest, os.W_OK):
+ print >> sys.stderr, "Cannot write to path %s" % opts.output_dest
+ sys.exit(1)
+
+ fc = func.overlord.client.Client(hosts, timeout=opts.timeout, nforks=opts.forks, delegate=opts.delegate)
+
+ print mycmd
+ results = fc.command.run(mycmd)
+ for (hn, output) in results.items():
+ if is_error(output):
+ msg = 'Error: %s: ' % hn
+ for item in output[1:3]:
+ if type(item) == type(''):
+ msg += ' %s' % item
+ errorprint(msg)
+ continue
+
+ if opts.output_dest:
+ fo = open(opts.output_dest + '/' + hn+'.output', 'w')
+ fo.write(mycmd + '\n')
+ if opts.returncodes:
+ fo.write('%s:\nreturn code:%s\n%s\n' % (hn, output[0], output[1]))
+ else:
+ fo.write('%s:\n%s\n' % (hn,output[1]))
+ fo.close()
+ continue
+
+ if opts.oneline:
+ if opts.returncodes:
+ print '%s:%s:%s' % (hn, output[0], output[1])
+ else:
+ print '%s:%s' % (hn, output[1])
+ else:
+ if opts.returncodes:
+ print '%s:\nreturn code:%s\n%s' % (hn, output[0], output[1])
+ else:
+ print '%s:\n%s' % (hn, output[1])
+
+ if opts.output_dest:
+ print "output written to %s" % opts.output_dest
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/scripts/func-down-hosts
^
|
@@ -0,0 +1,36 @@
+#!/usr/bin/python -tt
+# by skvidal
+# gplv2+
+
+import sys
+import func.overlord.client
+from func.overlord.scripts import base_func_parser, handle_base_func_options, errorprint
+from func.utils import is_error
+
+
+def main(args):
+ parser = base_func_parser(outputpath=False, timeoutdef=10)
+ opts, args = parser.parse_args(args)
+ opts = handle_base_func_options(parser, opts)
+
+
+ hosts ='*'
+ if opts.host:
+ hosts = ';'.join(opts.host)
+
+ fc = func.overlord.client.Client(hosts, timeout=opts.timeout, nforks=opts.forks)
+
+ results = fc.test.ping()
+ offline = []
+ for (hn, out) in results.items():
+ if out != 1:
+ offline.append(hn)
+
+ print '\n'.join(sorted(offline))
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
+
+
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/scripts/func-find-user
^
|
@@ -0,0 +1,73 @@
+#!/usr/bin/python -tt
+# by skvidal
+# gplv2+
+# return all the process owned or matching the username given
+
+
+
+import sys
+import func.overlord.client
+from func.utils import is_error
+from optparse import OptionParser
+
+def parse_args(args):
+ parser = OptionParser(version = "1.0")
+ parser.add_option('--host', default=[], action='append',
+ help="hosts to act on, defaults to ALL")
+ parser.add_option('--timeout', default=300, type='int',
+ help='set the wait timeout for func commands')
+ parser.add_option('--forks', default=40, type='int',
+ help='set the number of forks to start up')
+ parser.add_option('--hosts-from-file', default=None, dest="hostfile",
+ help="read list of hosts from this file, if '-' read from stdin")
+ (opts, args) = parser.parse_args(args)
+
+ if opts.hostfile:
+ hosts = []
+ if opts.hostfile == '-':
+ hosts = sys.stdin.readlines()
+ else:
+ hosts = open(opts.hostfile, 'r').readlines()
+
+ for hn in hosts:
+ hn = hn.strip()
+ if hn.startswith('#'):
+ continue
+ hn = hn.replace('\n', '')
+ opts.host.append(hn)
+
+
+ return opts, args, parser
+
+
+opts, args, parser = parse_args(sys.argv[1:])
+
+if len(args) < 1:
+ print parser.format_help()
+ sys.exit(1)
+
+username = args[0]
+
+hosts ='*'
+if opts.host:
+ hosts = ';'.join(opts.host)
+
+fc = func.overlord.client.Client(hosts, timeout=opts.timeout, nforks=opts.forks)
+
+results = fc.process.grep(username)
+for (hn, output) in results.items():
+ if is_error(output):
+ msg = 'Error: %s: ' % hn
+ for item in output[1:3]:
+ if type(item) == type(''):
+ msg += ' %s' % item
+ print >> sys.stderr, msg
+ continue
+
+ for val in output.values():
+ for line in val:
+ if line.find('func') != -1: # if we're seeing the func process for any reason, skip it
+ continue
+ print '%s:%s' % (hn, line)
+
+
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/scripts/func-grep
^
|
@@ -0,0 +1,76 @@
+#!/usr/bin/python -tt
+# by skvidal
+# gplv2+
+
+
+import sys
+import func.overlord.client
+from optparse import OptionParser
+from func.utils import is_error
+
+def parse_args(args):
+ parser = OptionParser(version = "1.0")
+ parser.add_option('--host', default=[], action='append',
+ help="hosts to act on, defaults to ALL")
+ parser.add_option('--timeout', default=300, type='int',
+ help='set the wait timeout for func commands')
+ parser.add_option('--forks', default=40, type='int',
+ help='set the number of forks to start up')
+ parser.add_option('--grep-options', default='-n', dest='grep_options',
+ help='set options to pass to grep "-r -i" for example')
+ parser.add_option('--hosts-from-file', default=None, dest="hostfile",
+ help="read list of hosts from this file, if '-' read from stdin")
+ (opts, args) = parser.parse_args(args)
+
+ if opts.hostfile:
+ hosts = []
+ if opts.hostfile == '-':
+ hosts = sys.stdin.readlines()
+ else:
+ hosts = open(opts.hostfile, 'r').readlines()
+
+ for hn in hosts:
+ hn = hn.strip()
+ if hn.startswith('#'):
+ continue
+ hn = hn.replace('\n', '')
+ opts.host.append(hn)
+
+ return opts, args, parser
+
+
+opts, args, parser = parse_args(sys.argv[1:])
+
+if len(args) < 2:
+ print parser.format_help()
+ sys.exit(1)
+
+search_str = args[0]
+search_where = args[1:]
+
+hosts ='*'
+if opts.host:
+ hosts = ';'.join(opts.host)
+
+fc = func.overlord.client.Client(hosts, timeout=opts.timeout, nforks=opts.forks)
+
+cmd = '/bin/grep %s %s %s' % (opts.grep_options, search_str, ' '.join(search_where))
+print cmd
+results = fc.command.run(cmd)
+for (hn, output) in results.items():
+ if is_error(output):
+ msg = 'Error: %s: ' % hn
+ for item in output[1:3]:
+ if type(item) == type(''):
+ msg += ' %s' % item
+ print >> sys.stderr, msg
+ continue
+
+
+ for line in output[1].split('\n'):
+ line = line.strip()
+ if not line:
+ continue
+ print '%s:%s' % (hn, line)
+
+
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/scripts/func-group
^
|
@@ -0,0 +1,11 @@
+#!/usr/bin/python
+import sys
+from func.overlord.cmd_modules.group import Group
+
+file_name, argv = sys.argv[0], sys.argv[1:]
+cli = Group()
+try:
+ cli.parse(argv)
+except Exception, e:
+ print "ERROR:", e
+ sys.exit(1)
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/scripts/func-list-vms-per-host
^
|
@@ -0,0 +1,59 @@
+#!/usr/bin/python -tt
+# by skvidal
+# gplv2+
+
+import sys
+import func.overlord.client
+from optparse import OptionParser
+
+def parse_args(args):
+ parser = OptionParser(version = "1.0")
+ parser.add_option('--host', default=[], action='append',
+ help="hosts to act on, defaults to ALL")
+ parser.add_option('--timeout', default=30, type='int',
+ help='set the wait timeout for func commands')
+ parser.add_option('--forks', default=40, type='int',
+ help='set the number of forks to start up')
+ parser.add_option('--hosts-from-file', default=None, dest="hostfile",
+ help="read list of hosts from this file, if '-' read from stdin")
+ (opts, args) = parser.parse_args(args)
+
+ if opts.hostfile:
+ hosts = []
+ if opts.hostfile == '-':
+ hosts = sys.stdin.readlines()
+ else:
+ hosts = open(opts.hostfile, 'r').readlines()
+
+ for hn in hosts:
+ hn = hn.strip()
+ if hn.startswith('#'):
+ continue
+ hn = hn.replace('\n', '')
+ opts.host.append(hn)
+
+ return opts, args, parser
+
+
+opts, args, parser = parse_args(sys.argv[1:])
+hosts ='*'
+if opts.host:
+ hosts = ';'.join(opts.host)
+
+fc = func.overlord.client.Client(hosts, timeout=opts.timeout, nforks=opts.forks)
+
+results = fc.system.list_modules()
+hosts_to_scan = []
+for (hn, mods) in results.items():
+ if 'virt' in mods:
+ hosts_to_scan.append(hn)
+
+fc = func.overlord.client.Client(';'.join(hosts_to_scan), timeout=opts.timeout, nforks=len(hosts_to_scan))
+results = fc.virt.info()
+
+for (hn, vms) in sorted(results.items()):
+ for (vm, info) in sorted(vms.items()):
+ if vm == 'Domain-0':
+ continue
+ print '%s:%s:%s' % (hn, vm, info['state'])
+
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/scripts/func-ps-compare
^
|
@@ -0,0 +1,71 @@
+#!/usr/bin/python -tt
+# by skvidal
+# gplv2+
+
+import sys
+import func.overlord.client as fclient
+from optparse import OptionParser
+from func.utils import is_error
+
+
+def get_host_list(hosts):
+ fc = fclient.Client(hosts)
+ host_list = fc.minions_class.get_all_hosts() # grumble
+ return host_list
+
+
+def parse_args(args):
+ parser = OptionParser(version = "1.0")
+ parser.add_option('--timeout', default=10, type='int',
+ help='set the wait timeout for func commands')
+ (opts, args) = parser.parse_args(args)
+
+ return opts, args, parser
+
+
+opts, extcmds, parser = parse_args(sys.argv[1:])
+
+
+if len(extcmds) != 2:
+ print("func-ps-compare hostname1 hostname2")
+ print("Must specify exactly two hosts to compare")
+ sys.exit(1)
+
+hosts = ';'.join(extcmds)
+host_list = get_host_list(hosts)
+
+if len(host_list) != 2:
+ print("Must specify exactly two hosts to compare, hosts found: %s" % ' '.join(host_list))
+ sys.exit(1)
+
+host1 = host_list[0]
+host2 = host_list[1]
+
+fc = fclient.Client(hosts, timeout=opts.timeout, nforks=2)
+results = fc.process.info("axw")
+
+processes = {}
+for n in [host1, host2]:
+ processes[n] = set([])
+ if is_error(results[n]):
+ print 'Error from %s' % n
+ print items
+ sys.exit(1)
+
+ for items in results[n]:
+ if not items:
+ continue
+ comm = ' '.join(items[4:])
+ processes[n].add(comm)
+
+host1diff = processes[host1].difference(processes[host2])
+host2diff = processes[host2].difference(processes[host1])
+
+print 'Processes running on %s not on %s' % (host1, host2)
+print '\n'.join(host1diff)
+
+print '\nProcesses running on %s not on %s' % (host2, host1)
+print '\n'.join(host2diff)
+
+
+
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/scripts/func-transmit
^
|
@@ -42,6 +42,7 @@
import func.overlord.func_command as func_command
import func.overlord.client as fc
+from func.overlord.command import FuncOptionParser
import func.yaml as yaml
import func.CommonErrors
@@ -82,7 +83,7 @@
def main(argv):
# load input parameters
- parser = optparse.OptionParser()
+ parser = FuncOptionParser(version=True)
parser.add_option("-j","--json",
help="Json Parser",
action="store_true",
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/scripts/func-whatsmyname
^
|
@@ -0,0 +1,58 @@
+#!/usr/bin/python -tt
+
+
+import sys
+import func.overlord.client
+from func.utils import is_error
+from optparse import OptionParser
+
+def parse_args(args):
+ parser = OptionParser(version = "1.0")
+ parser.add_option('--host', default=[], action='append',
+ help="hosts to act on, defaults to ALL")
+ parser.add_option('--timeout', default=10, type='int',
+ help='set the wait timeout for func commands')
+ parser.add_option('--forks', default=40, type='int',
+ help='set the number of forks to start up')
+ parser.add_option('--hosts-from-file', default=None, dest="hostfile",
+ help="read list of hosts from this file, if '-' read from stdin")
+ (opts, args) = parser.parse_args(args)
+
+ if opts.hostfile:
+ hosts = []
+ if opts.hostfile == '-':
+ hosts = sys.stdin.readlines()
+ else:
+ hosts = open(opts.hostfile, 'r').readlines()
+
+ for hn in hosts:
+ hn = hn.strip()
+ if hn.startswith('#'):
+ continue
+ hn = hn.replace('\n', '')
+ opts.host.append(hn)
+
+ return opts, args, parser
+
+opts, args, parser = parse_args(sys.argv[1:])
+
+hosts ='*'
+if opts.host:
+ hosts = ';'.join(opts.host)
+
+fc = func.overlord.client.Client(hosts, timeout=opts.timeout, nforks=opts.forks)
+results = fc.command.run('/bin/hostname -f')
+for (hn, output) in results.items():
+ if is_error(output):
+ msg = 'Error: %s: ' % hn
+ for item in output[1:3]:
+ if type(item) == type(''):
+ msg += ' %s' % item
+ print >> sys.stderr, msg
+ continue
+
+ myname = output[1][:-1]
+ if hn != myname:
+ print "mismatch puppetname:%s does not match host known name: %s" % (hn, myname)
+
+
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/scripts/func-yum
^
|
@@ -0,0 +1,665 @@
+#!/usr/bin/python -tt
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Copyright 2010 Red Hat, Inc
+# Written By Seth Vidal - skvidal@fedoraproject.org
+
+#func yum overlord script
+
+
+# TODO:
+# install ....
+# remove ....
+# push custom module over func and activate
+# config file
+# --test mode/options
+# add arbitrary args to a command (include, exclude, etc)
+# needs restarting
+# is running kernel latest installed (needs reboot?)
+# get list of repos
+# some kind of locking mechanism - so we hold off hitting a request on a host that's already doing something
+# maybe that means client-local locking on the minion-side.
+
+
+
+import sys
+import os
+import time
+import stat
+import re
+import glob
+from optparse import OptionParser
+import func.overlord.client as fclient
+from func.utils import is_error
+from certmaster.config import read_config
+
+
+class FYError(Exception):
+ def __init__(self, value=None):
+ Exception.__init__(self)
+ self.value = value
+ def __str__(self):
+ return "%s" %(self.value,)
+
+
+def parse_time(s):
+ MULTS = {'d': 60 * 60 * 24, 'h' : 60 * 60, 'm' : 60, 's': 1}
+
+
+ if s[-1].isalpha():
+ n = s[:-1]
+ unit = s[-1].lower()
+ mult = MULTS.get(unit, None)
+ if not mult:
+ raise ValueError("unknown unit '%s'" % unit)
+ else:
+ n = s
+ mult = 1
+
+ try:
+ n = float(n)
+ except (ValueError, TypeError), e:
+ raise ValueError('invalid value')
+
+ if n < 0:
+ raise ValueError("seconds value may not be negative")
+
+ return int(n * mult)
+
+def errorprint(msg):
+ print >> sys.stderr, msg
+
+def parse_args(args):
+ basecmds = ('update', 'getinfo', 'status', 'install', 'remove', 'list',
+ 'custom', 'clean', 'search', 'compare')
+
+ usage = """func-yum [options] [command]
+commands: \n %s""" % '\n '.join(sorted(basecmds))
+ parser = OptionParser(version = "func-yum 1.0", usage=usage)
+ parser.add_option('--host', default=[], action='append',
+ help="hosts to act on, defaults to ALL")
+ parser.add_option('--hosts-from-file', default=None, dest="hostfile",
+ help="read list of hosts from this file, if '-' read from stdin")
+ parser.add_option('--timeout', default=5000, type='int',
+ help='set the wait timeout for func commands')
+ parser.add_option('--short-timeout', default=5, type='int', dest='short_timeout',
+ help='set the short timeout wait for connecting to hosts')
+ parser.add_option('--forks', default=60, type='int',
+ help='set the number of forks to start up')
+ parser.add_option('-q','--quiet', default=False, action='store_true',
+ help='only output what you asked for')
+ parser.add_option('--store-custom-as', default=None, dest='store_custom_as',
+ help='store the custom command output as this keyname')
+ parser.add_option('--clean-older-than', default='2W', dest='clean_older_than',
+ help='data stored which is older than this will be cleaned up.')
+ parser.add_option('--outputpath', default='/var/cache/func-yum/', dest='outputpath',
+ help="path where func-yum's cache will be stored")
+
+ (opts, args) = parser.parse_args(args)
+
+ if not args:
+ print parser.format_help()
+ sys.exit(1)
+
+ if args[0] not in basecmds:
+ print parser.usage
+ sys.exit(1)
+
+ if opts.outputpath[-1] != '/':
+ opts.outputpath = opts.outputpath + '/'
+
+
+ if opts.hostfile:
+ hosts = []
+ if opts.hostfile == '-':
+ hosts = sys.stdin.readlines()
+ else:
+ hosts = open(opts.hostfile, 'r').readlines()
+
+ for hn in hosts:
+ hn = hn.strip()
+ if hn.startswith('#'):
+ continue
+ hn = hn.replace('\n', '')
+ opts.host.append(hn)
+
+
+ return opts, args
+
+def filter_hosts(hosts, opts):
+ """returns two lists: online and offline hosts"""
+
+ online = []
+ offline = []
+ fc = fclient.Client(hosts, timeout=opts.short_timeout, nforks=opts.forks)
+ results = fc.test.ping()
+ for (hn, out) in results.items():
+ if out != 1:
+ offline.append(hn)
+ else:
+ online.append(hn)
+
+ if not online:
+ errorprint("No hosts online after filter to access")
+ errorprint("Offline Hosts: %s" % ' '.join(offline))
+ sys.exit(1)
+
+ return online, offline
+
+
+def _write_data(basepath, data_key, data_val, timestamp=None, make_latest=True, error=False):
+ """take data and output it into a location, mark it as the latest, too"""
+ if not timestamp:
+ timestamp = time.strftime('%Y-%m-%d-%H:%M:%S')
+ # someplace/$hostname/data_key/timestamp and then symlinked to 'latest'
+
+ dn = basepath + '/' + data_key
+ latest = dn + '/latest'
+ latesterror = dn + '/latest-error'
+
+ if not os.path.exists(dn):
+ os.makedirs(dn)
+
+ fn = dn + '/' + timestamp
+ if error:
+ fn += '-error'
+
+ fo = open(fn, 'w')
+ if type(data_val) == type([]):
+ for line in data_val:
+ if line.strip():
+ fo.write(line + '\n')
+ else:
+ if data_val.strip():
+ fo.write(data_val)
+ fo.flush()
+ fo.close()
+
+ if make_latest:
+ if os.path.exists(latest):
+ os.unlink(latest)
+ os.symlink(fn, latest)
+
+ if error:
+ if os.path.exists(latesterror):
+ os.unlink(latesterror)
+ os.symlink(fn, latesterror)
+
+ return latest
+
+def _wait_for_async(fc, opts, jobid):
+ finished = False
+ last_completed = -1
+ results = {}
+ while not finished:
+ (jobstatus, info) = fc.job_status(jobid)
+ if type(info) != type({}):
+ completed = 0
+ else:
+ completed = len(info.keys())
+
+ if not opts.quiet:
+ if completed != last_completed:
+ print '%s/%s hosts finished' % (completed, len(fc.minions))
+ last_completed = completed
+
+ if jobstatus == 1:
+ finished=True
+ results = info
+ break
+ time.sleep(5)
+
+ return results
+
+def _confirm_on_change(basecmd, extcmds, hosts):
+ print 'Preparing to run: %s %s' % (basecmd, ' '.join(extcmds))
+ print 'Running on:\n %s' % '\n '.join(sorted(hosts))
+ try:
+ junk = raw_input('If not okay, ctrl-c now, else press enter now')
+ except KeyboardInterrupt, e:
+ print "\n\nExiting"
+ sys.exit(0)
+
+def store_info(fc, opts):
+ # retrieve info to outputpath/$hostname/installed/timestamp
+
+ # ping the box first - if it fails - move on.
+
+ now = time.strftime('%Y-%m-%d-%H:%M:%S')
+ #results = fc.rpms.inventory()# would like to use inventory, but no can do,
+ # until I fix/check the module problems on python 2.4
+
+ errors = []
+ # installed pkgs
+ results = fc.command.run('rpm -qa')
+ data_key = 'installed'
+ for (hn, output) in results.items():
+ error = False
+ if is_error(output):
+ errors.append('Error getting installed from %s' % hn)
+ error = True
+
+ basepath = opts.outputpath + hn
+ data_val = sorted(output[1].split('\n'))
+ _write_data(basepath, data_key, data_val, timestamp=now, error=error)
+
+ # available updates
+ results = fc.yumcmd.check_update()
+ data_key = 'updates'
+ for (hn, output) in results.items():
+ error = False
+ if is_error(output):
+ errors.append('Error getting updates from %s' % hn)
+ error = True
+
+ basepath = opts.outputpath + hn
+ data_val = sorted(output)
+ _write_data(basepath, data_key, data_val, timestamp=now, error=error)
+
+ # orphaned/extras pkgs
+ # fixme - make this not a command.run but something in yumcmd
+ results = fc.command.run('/usr/bin/package-cleanup -q --orphans')
+ data_key = 'orphans'
+ for (hn, output) in results.items():
+ error = False
+ if is_error(output):
+ errors.append('Error getting orphans from %s' % hn)
+ error = True
+
+ basepath = opts.outputpath + hn
+ data_val = output[1]
+ _write_data(basepath, data_key, data_val, timestamp=now, error=error)
+
+
+ # get yum list-security if we can
+ results = fc.command.run('/usr/bin/yum list-security')
+ data_key = 'security-updates'
+ for (hn, output) in results.items():
+ error = False
+ if is_error(output):
+ errors.append('Error getting security-list from %s' % hn)
+ error = True
+
+ basepath = opts.outputpath + hn
+ res = []
+ for line in output[1].split('\n'):
+ if line.startswith('Loaded plugins'):
+ continue
+ if line.startswith('list-security'):
+ continue
+ res.append(line)
+ data_val = res
+ _write_data(basepath, data_key, data_val, timestamp=now, error=error)
+
+ # get the needs_restarting code over to the clients and generate that list
+ # as well
+
+ return errors
+
+def update(fc, opts, pkg_list):
+ errors = []
+ pkg_str = None
+ if pkg_list:
+ pkg_str = ' '.join(pkg_list)
+ if pkg_str:
+ jobid = fc.yumcmd.update(pkg_str)
+ else:
+ jobid = fc.yumcmd.update()
+
+ results = _wait_for_async(fc, opts, jobid)
+
+ data_key = 'updated'
+ for (hn, output) in results.items():
+ error = False
+ if is_error(output):
+ errors.append('Error updating %s' % hn)
+ error = True
+
+ basepath = opts.outputpath + hn
+ data_val = output
+ res = _write_data(basepath, data_key, data_val, error=error)
+ if not opts.quiet: print 'outputted results for %s to:\n %s' % (hn, res)
+ return errors
+
+def custom(fc, opts, args):
+ errors = []
+ fullcmd = ''
+ if args[0][0] != '/':
+ fullcmd += '/usr/bin/yum '
+ fullcmd += '%s' % ' '.join(args)
+
+ print fullcmd
+ data_key = 'custom'
+ if opts.store_custom_as:
+ data_key = opts.store_custom_as
+
+ results = fc.command.run(fullcmd)
+ for (hn, output) in results.items():
+ error = False
+ if is_error(output):
+ errors.append('Error running custom command: %s on %s' % (fullcmd, hn))
+ error = True
+
+ data_val = fullcmd + '\n'
+ data_val += output[1]
+ basepath = opts.outputpath + hn
+
+ res = _write_data(basepath, data_key, data_val, error=error)
+ if not opts.quiet: print 'outputted results for %s to:\n %s' % (hn, res)
+ return errors
+
+def return_status(hosts, opts):
+
+ # needs updates
+ # last updates applied on
+ # num pkgs installed
+ # num orphans
+ # last time inventory was gotten
+ status = {}
+ for hn in hosts:
+ if hn not in status:
+ status[hn] = {'last_check': None,
+ 'latest_updated': None,
+ 'num_updates':'unknown',
+ 'num_installed': 'unknown',
+ 'num_orphans': 'unknown'}
+ hnstats = status[hn]
+ mypath = opts.outputpath+hn
+ if os.path.exists(mypath + '/installed/latest'):
+ hnstats['last_check'] = os.stat(mypath +'/installed/latest')[stat.ST_MTIME]
+ if os.path.exists(mypath + '/updated/latest'):
+ hnstats['latest_updated'] = os.stat(mypath +'/updated/latest')[stat.ST_MTIME]
+ if os.path.exists(mypath + '/updates/latest'):
+ hnstats['num_updates'] = len(open(mypath + '/updates/latest').readlines())
+ if os.path.exists(mypath + '/installed/latest'):
+ hnstats['num_installed'] = len(open(mypath + '/installed/latest').readlines())
+ if os.path.exists(mypath + '/orphans/latest'):
+ hnstats['num_orphans'] = len(open(mypath + '/orphans/latest').readlines())
+
+ return status
+
+def _convert_date_to_relative(now, then):
+ """return a time relative to now of the timestamp (then)"""
+
+ if not then:
+ return 'Never'
+
+ difftime = now - then
+
+ if difftime > 86400*28:
+ return "LONG LONG AGO: %s" % str(time.strftime('%Y-%m-%d-%H:%M', time.localtime(then)))
+
+ if difftime > 86400*7: # weeks
+ weeks = difftime / (86400*7)
+ return "%s weeks ago" % int(weeks)
+
+ if difftime > 86400: #days
+ days = difftime / 86400
+ return "%s days ago" % int(days)
+
+ if difftime > 3600: #hours
+ hours = difftime / 3600
+ return "%s hours ago" % int(hours)
+
+ if difftime > 60: #minutes
+ mins = difftime / 60
+ return "%s minutes ago" % int(mins)
+
+ if difftime < 60:
+ return "Just Now"
+
+
+
+
+def return_info(hn, opts, infotype=None, as_list=False):
+ if not infotype:
+ raise FYError, "No Infotype specified"
+
+ if (not os.path.exists(opts.outputpath + '/' + hn) or
+ not os.path.exists(opts.outputpath + '/' + hn + '/' + infotype) or
+ not os.path.exists(opts.outputpath + '/' + hn + '/' + infotype + '/latest')):
+ msg = 'info of type: %s not available for: %s\n' % (infotype,hn)
+ raise FYError, msg
+
+ fo = open(opts.outputpath + '/' + hn + '/' + infotype + '/latest', 'r')
+ if as_list:
+ info = fo.readlines()
+ else:
+ info = fo.read()
+ fo.close()
+ return info
+
+
+def search(hosts, opts, search_str, target=None):
+ results = {} # hostname = [target: matched line]
+ re_obj = re.compile(search_str)
+ if not target:
+ target=['*']
+ elif type(target) == type(''):
+ target = [target]
+ for hn in hosts:
+ for tgt in target:
+ fns = glob.glob(opts.outputpath + '/' + hn + '/' + tgt + '/latest')
+ for fn in fns:
+ thistarget = fn.replace('/latest', '')
+ thistarget = thistarget.replace(opts.outputpath + '/' + hn + '/', '')
+ for r in open(fn, 'r').readlines():
+ if re_obj.search(r):
+ if hn not in results:
+ results[hn] = []
+ results[hn].append('%s:%s' % (thistarget, r.strip()))
+ return results
+
+def get_host_list(hosts):
+ fc = fclient.Client(hosts)
+ host_list = fc.minions_class.get_all_hosts() # grumble
+ return host_list
+
+def main(args):
+
+ opts, args = parse_args(args)
+ basecmd = args[0]
+ extcmds = args[1:]
+ hosts ='*'
+ if opts.host:
+ hosts = ';'.join(opts.host)
+
+
+
+ if basecmd == 'getinfo':
+
+ hosts, offline = filter_hosts(hosts, opts)
+ getinfo_forks = len(hosts) # gives us a slight advantage on an expensive operation
+ fc = fclient.Client(';'.join(hosts), timeout=opts.timeout, nforks=getinfo_forks)
+ errors = store_info(fc, opts)
+ if not opts.quiet:
+ print 'stored info for:'
+ for h in sorted(hosts):
+ print ' %s' % h
+
+ print 'offline hosts:'
+ for h in sorted(offline):
+ print ' %s' % h
+
+ for error in errors:
+ errorprint(' %s' % error)
+
+ elif basecmd == 'update':
+ hosts, offline = filter_hosts(hosts, opts)
+
+ fc = fclient.Client(';'.join(hosts), timeout=opts.timeout, nforks=opts.forks, async=True)
+ _confirm_on_change(basecmd, extcmds, hosts)
+
+ errors = update(fc, opts, extcmds)
+ for error in errors:
+ errorprint(' %s' % error)
+ if not opts.quiet:
+ print 'updating stored info for updated hosts'
+ fc = fclient.Client(';'.join(hosts), timeout=opts.timeout, nforks=opts.forks)
+ errors = store_info(fc, opts) # get latest info for the hosts
+ for error in errors:
+ errorprint(' %s' % error)
+
+
+
+ elif basecmd == 'status':
+ host_list = get_host_list(hosts)
+ now = time.time()
+ status = return_status(host_list, opts)
+ for hn in sorted(status.keys()):
+ msg = """%s:
+ Last checked: %s
+ Last update run: %s
+ Updates available: %s
+ Installed pkgs: %s
+ Orphaned Pkgs: %s
+ """ % (hn, _convert_date_to_relative(now, status[hn]['last_check']),
+ _convert_date_to_relative(now, status[hn]['latest_updated']),
+ status[hn]['num_updates'], status[hn]['num_installed'],
+ status[hn]['num_orphans'])
+ print msg
+
+
+ elif basecmd == 'list':
+ host_list = get_host_list(hosts)
+ extopts = ['installed', 'updates', 'orphans', 'security-updates',
+ 'updated', 'with-security', 'with-updates']
+ if len(extcmds) == 0:
+ errorprint("specify %s" % ' '.join(extopts))
+ return 1
+
+ for item in extcmds:
+ if item in ['installed', 'updates', 'orphans', 'updated', 'security-updates']:
+ for hn in sorted(host_list):
+ try:
+ info = return_info(hn,opts, item)
+ except FYError, e:
+ errorprint(str(e))
+ else:
+ if item == 'updates': # don't print out empty update results
+ if not info:
+ continue
+ print '%s %s:' % (hn, item)
+ print info
+ print ''
+
+ elif item.startswith('with-'):
+ if item == 'with-security':
+ item = 'with-security-updates'
+ item_name = item.replace('with-', '')
+
+ hwu = {}
+ for hn in sorted(host_list):
+ res = []
+ try:
+ this_list = return_info(hn, opts, item_name, as_list=True)
+ except FYError, e:
+ res.append(str(e))
+ continue
+
+ for line in this_list:
+ if re.match('\s*(#|$)', line):
+ continue
+ res.append(line)
+
+ if res:
+ hwu[hn]=len(res)
+
+ for h,num in sorted(hwu.items()):
+ print '%s %s : %s' % (item_name, h, num)
+ else:
+ for hn in sorted(host_list):
+ try:
+ info = return_info(hn,opts, item)
+ except FYError, e:
+ continue
+ print '%s %s' % (hn, item)
+ print info
+ print ''
+
+ elif basecmd == 'custom':
+ hosts, offline = filter_hosts(hosts, opts)
+ fc = fclient.Client(';'.join(hosts), timeout=opts.timeout, nforks=opts.forks)
+ _confirm_on_change(basecmd, extcmds, hosts)
+ errors = custom(fc, opts, extcmds)
+ for error in errors:
+ errorprint(' %s' % error)
+
+ elif basecmd == 'clean':
+ host_list = get_host_list(hosts)
+ extopts = ['old-data', 'downed-hosts', 'empty-hosts']
+ if len(extcmds) == 0:
+ errorprint("specify %s" % ' '.join(extopts))
+ return 1
+
+ for item in extcmds:
+ if item == 'old-data':
+ pass
+ elif basecmd == 'search':
+ host_list = get_host_list(hosts)
+ if not extcmds:
+ errorprint("search searchstring [where to search: installed, updates, updated]")
+ errorprint("must specify at least what to search for")
+ return 1
+ search_str = extcmds[0]
+ if len(extcmds) > 1:
+ search_target = extcmds[1:]
+ else:
+ search_target = None
+ results = search(host_list, opts, search_str, target=search_target)
+ for hn in sorted(results.keys()):
+ for i in sorted(results[hn]):
+ print '%s:%s' % (hn, i)
+
+ elif basecmd == 'compare':
+ if len(extcmds) != 2:
+ errorprint("func-yum compare hostname1 hostname2")
+ errorprint("Must specify exactly two hosts to compare")
+ return 1
+ hosts = ';'.join(extcmds)
+ host_list = get_host_list(hosts)
+ if len(host_list) != 2:
+ errorprint("Must specify exactly two hosts to compare, hosts found: %s" % ' '.join(host_list))
+ return 1
+ host1 = host_list[0]
+ host2 = host_list[1]
+ try:
+ host1_inst = set(return_info(host1, opts, 'installed', as_list=True))
+ host2_inst = set(return_info(host2, opts, 'installed', as_list=True))
+ except FYError, msg:
+ errorprint("Error: %s" % msg)
+ return 1
+ host1diff = host1_inst.difference(host2_inst)
+ host2diff = host2_inst.difference(host1_inst)
+ print 'Packages on %s not on %s' % (host1, host2)
+ print ''.join(host1diff)
+ print 'Packages on %s not on %s' % (host2, host1)
+ print ''.join(host2diff)
+
+ else:
+ errorprint('command %s not implemented yet' % basecmd)
+ return 1
+
+
+ # install # pkg
+ # remove # pkg pkg pkg
+
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
+
+
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/setup.py
^
|
@@ -4,7 +4,7 @@
#from setuptools import setup,find_packages
NAME = "func"
-VERSION = open("version", "r+").read().split()[0]
+VERSION = "0.28"
SHORT_DESC = "%s remote configuration and management api" % NAME
LONG_DESC = """
A small pluggable xml-rpc daemon used by %s to implement various web services hooks
@@ -12,60 +12,77 @@
if __name__ == "__main__":
-
- manpath = "share/man/man1/"
- etcpath = "/etc/%s" % NAME
- etcmodpath = "/etc/%s/modules" % NAME
- initpath = "/etc/init.d/"
- logpath = "/var/log/%s/" % NAME
- varpath = "/var/lib/%s/" % NAME
- rotpath = "/etc/logrotate.d"
- aclpath = "%s/minion-acl.d" % etcpath
- setup(
- name="%s" % NAME,
- version = VERSION,
- author = "Lots",
- author_email = "func-list@redhat.com",
- url = "https://hosted.fedoraproject.org/projects/func/",
- license = "GPL",
- scripts = [
- "scripts/funcd",
- "scripts/func",
- "scripts/func-inventory",
- "scripts/func-create-module",
- "scripts/func-transmit",
- "scripts/func-build-map"
- ],
- package_dir = {"%s" % NAME: "%s" % NAME
- },
- packages = ["%s" % NAME,
- "%s/minion" % NAME,
- "%s/overlord" % NAME,
- "%s/overlord/cmd_modules" % NAME,
- "%s/overlord/modules" % NAME,
- "%s/minion/modules" % NAME,
- "%s/yaml" % NAME,
- # FIXME if there's a clean/easy way to recursively
- # find modules then by all means do it, for now
- # this will work.
- "%s/minion/modules.netapp" % NAME,
- "%s/minion/modules.netapp.vol" % NAME,
- "%s/minion/modules.iptables" % NAME
- ],
- data_files = [(initpath, ["init-scripts/funcd"]),
- (etcpath, ["etc/minion.conf"]),
- (etcpath, ["etc/async_methods.conf"]),
- (manpath, ["docs/func.1.gz"]),
- (manpath, ["docs/func-inventory.1.gz"]),
- (manpath, ["docs/funcd.1.gz"]),
- (manpath, ["docs/func-transmit.1.gz"]),
- (rotpath, ['etc/func_rotate']),
- (logpath, []),
- (etcmodpath, ['etc/Test.conf']),
- (varpath, []),
- (aclpath, [])
- ],
- description = SHORT_DESC,
- long_description = LONG_DESC
- )
+ manpath = "share/man/man1/"
+ etcpath = "/etc/%s" % NAME
+ etcmodpath = "/etc/%s/modules" % NAME
+ initpath = "/etc/init.d/"
+ logpath = "/var/log/%s/" % NAME
+ varpath = "/var/lib/%s/" % NAME
+ rotpath = "/etc/logrotate.d"
+ aclpath = "%s/minion-acl.d" % etcpath
+ setup(
+ name="%s" % NAME,
+ version = VERSION,
+ author = "Lots",
+ author_email = "func-list@redhat.com",
+ url = "https://fedorahosted.org/func/",
+ license = "GPL",
+ scripts = [
+ "scripts/funcd",
+ "scripts/func",
+ "scripts/func-group",
+ "scripts/func-inventory",
+ "scripts/func-create-module",
+ "scripts/func-transmit",
+ "scripts/func-build-map",
+ "scripts/func-command",
+ "scripts/func-down-hosts",
+ "scripts/func-find-user",
+ "scripts/func-grep",
+ "scripts/func-list-vms-per-host",
+ "scripts/func-ps-compare",
+ "scripts/func-whatsmyname",
+ "scripts/func-yum",
+ ],
+ package_dir = {"%s" % NAME: "%s" % NAME
+ },
+ packages = ["%s" % NAME,
+ "%s/minion" % NAME,
+ "%s/overlord" % NAME,
+ "%s/overlord/cmd_modules" % NAME,
+ "%s/overlord/modules" % NAME,
+ "%s/overlord/group" % NAME,
+ "%s/minion/modules" % NAME,
+ "%s/minion/facts" % NAME,
+ "%s/minion/facts/modules/" % NAME,
+ "%s/yaml" % NAME,
+ # FIXME if there's a clean/easy way to recursively
+ # find modules then by all means do it, for now
+ # this will work.
+ "%s/minion/modules.netapp" % NAME,
+ "%s/minion/modules.netapp.vol" % NAME,
+ "%s/minion/modules.iptables" % NAME
+ ],
+ data_files = [(initpath, ["init-scripts/funcd"]),
+ (etcpath, ["etc/minion.conf",
+ "etc/overlord.conf",
+ "etc/async_methods.conf",
+ "etc/version"]),
+ (manpath, ["docs/func.1.gz",
+ "docs/func-inventory.1.gz",
+ "docs/funcd.1.gz",
+ "docs/func-transmit.1.gz",
+ "docs/func-create-module.1.gz",
+ "docs/func-build-map.1.gz"]),
+ (rotpath, ['etc/func_rotate']),
+ (logpath, []),
+ (etcmodpath, ['etc/Test.conf',
+ 'etc/Bridge.conf',
+ 'etc/Vlan.conf']),
+ (varpath, []),
+ (aclpath, [])
+ ],
+ description = SHORT_DESC,
+ long_description = LONG_DESC
+ )
|
[-]
[+]
|
Added |
func-0.28.tar.bz2/test/test-confmgt_augeas.py
^
|
@@ -0,0 +1,292 @@
+#!/usr/bin/env python
+
+
+import sys
+import func.overlord.client as fc
+c = fc.Client("*")
+
+print c.configmgt_augeas.getenv('AUGEAS_ROOT')
+
+def chroottest():
+ envdict={}
+ print 'Get the env. variable AUGEAS_ROOT'
+ for host,envlist in c.confmgt_augeas.getenv('AUGEAS_ROOT').iteritems():
+ print "Host: "+host
+ augroot=envlist.get('AUGEAS_ROOT')
+ print 'AUGEAS_ROOT: '+augroot
+ envdict[host]=augroot
+ if augroot == '/' or augroot == '(none)':
+ print "The node "+host+" is not chrooted with AUGEAS_ROOT"
+ print "You should consider setting this variable"
+ print "before launching this test."
+ sys.exit()
+ print
+
+ print 'Prepare the test environment'
+ for host in envdict:
+ augroot=envdict[host]
+ client = fc.Client(host)
+ print client.command.run('mkdir -p '+augroot+'/etc/ssh')
+ print client.command.run('cp /etc/hosts '+augroot+'/etc')
+ print client.command.run('cp -r /etc/ssh/* '+augroot+'/etc/ssh')
+ print
+
+chroottest()
+
+
+def basictest():
+ #print 'Backup sshd_config'
+ #print c.confmgt_augeas.backup('/etc/ssh/sshd_config')
+ #print
+
+ print 'Delete the Parameter PermitRootLogin in sshd_config'
+ print c.confmgt_augeas.rm('/etc/ssh/sshd_config','PermitRootLogin')
+ print
+
+ print 'Delete the Parameter Port in sshd_config with an Augeas-style path'
+ print c.confmgt_augeas.rm('/etc/ssh/sshd_config/Port')
+ print
+
+ print 'Get sshd_config Port value.'
+ print c.confmgt_augeas.get('/etc/ssh/sshd_config','Port')
+ print
+
+ print 'Set Port to 22 in sshd_config'
+ print c.confmgt_augeas.set('/etc/ssh/sshd_config','Port','22')
+ print
+
+ print 'Get sshd_config Port value.'
+ print c.confmgt_augeas.get('/etc/ssh/sshd_config','Port')
+ print
+
+ print 'Try to delete a non existant parameter in sshd_config'
+ print c.confmgt_augeas.rm('/etc/ssh/sshd_config','Nawak')
+ print
+
+ print 'Try to delete a parameter in a non existant file.'
+ print c.confmgt_augeas.rm('/etc/ssh/nimp','Nawak')
+ print
+
+ print 'Get sshd_config PermitRootLogin value.'
+ print c.confmgt_augeas.get('/etc/ssh/sshd_config','PermitRootLogin')
+ print
+
+ print 'Set PermitRootLogin to yes in sshd_config'
+ print c.confmgt_augeas.set('/etc/ssh/sshd_config','PermitRootLogin','yes')
+ print
+
+ print 'Set PermitRootLogin to no in sshd_config with an Augeas-style path.'
+ print c.confmgt_augeas.set('/etc/ssh/sshd_config/PermitRootLogin','','no')
+ print
+
+ print 'Set PermitRootLogin to yes in sshd_config with an Augeas-style path.'
+ print c.confmgt_augeas.set('/etc/ssh/sshd_config/PermitRootLogin','','yes')
+ print
+
+ print 'Get sshd_config PermitRootLogin value.'
+ print c.confmgt_augeas.get('/etc/ssh/sshd_config','PermitRootLogin')
+ print
+
+ print 'Get sshd_config PermitRootLogin value with an Augeas-style path.'
+ print c.confmgt_augeas.get('/etc/ssh/sshd_config/PermitRootLogin')
+ print
+
+ print 'Attempt to get a value for a non existant param in sshd_config'
+ print c.confmgt_augeas.get('/etc/ssh/sshd_config','Nawak')
+ print
+
+ print 'Attempt to get a value for an empty param in sshd_config'
+ print c.confmgt_augeas.get('/etc/ssh/sshd_config','Subsystem')
+ print
+
+ print 'Search for conf. entry in hosts file with canonical hostname = localhost'
+ print c.confmgt_augeas.match('/etc/hosts','canonical','localhost')
+ print
+
+ #print 'List all direct children of hosts (not very useful)'
+ #print c.confmgt_augeas.ls('/etc/hosts/*')
+ #print
+
+ print 'List all direct children parameters of 1st hosts entry.'
+ for host,paramlist in c.confmgt_augeas.ls('/etc/hosts/1').iteritems():
+ print "Host: "+host
+ if type(paramlist) == type({}):
+ for node in paramlist['nodes']:
+ print node[0]+" = "+node[1]
+ else:
+ print paramlist
+ print
+
+ print 'List all children nodes of 1st hosts entry.'
+ for host,paramlist in c.confmgt_augeas.printconf('/etc/hosts/1').iteritems():
+ print "Host: "+host
+ if type(paramlist) == type({}):
+ for node in paramlist['nodes']:
+ print node[0]+" = "+node[1]
+ else:
+ print paramlist
+ print
+
+ print 'Get values of 1st host entry.'
+ print c.confmgt_augeas.get('/etc/hosts/','1')
+ print
+
+ print 'List all values for parameter of 1st fstab entry.'
+ minionDict=c.confmgt_augeas.ls('/etc/fstab/1')
+ for host,entry in minionDict.iteritems():
+ print "Host: "+host
+ if type(entry) == type({}):
+ print "Entry path: "+entry['path']
+ for node in entry['nodes']:
+ print node[0]+" = "+node[1]
+ else:
+ print entry
+ print
+
+ print 'Get ipaddr of /etc/hosts 1st entry.'
+ print c.confmgt_augeas.get('/etc/hosts/1','ipaddr')
+ print
+ #
+ #print 'List all direct children parameters of sshd_config'
+ #for host,paramlist in c.confmgt_augeas.ls('/etc/ssh/sshd_config').iteritems():
+ # print "Host: "+host
+ # for node in paramlist['nodes']:
+ # print node[0]+" = "+node[1]
+ #print
+ #
+ print 'List all children nodes of sshd_config'
+ for host,paramlist in c.confmgt_augeas.printconf('/etc/ssh/sshd_config').iteritems():
+ print "Host: "+host
+ for node in paramlist['nodes']:
+ print node[0]+" = "+node[1]
+ print
+
+ print 'List all direct children of AcceptEnv entries in sshd_config'
+ for host,paramlist in c.confmgt_augeas.ls('/etc/ssh/sshd_config/AcceptEnv').iteritems():
+ print "Host: "+host
+ if type(paramlist)==type({}):
+ for node in paramlist['nodes']:
+ print node[0]+" = "+node[1]
+ else:
+ print paramlist
+ print
+
+ print 'See all AcceptEnv entries in sshd_config'
+ for host,paramlist in c.confmgt_augeas.printconf('/etc/ssh/sshd_config/AcceptEnv').iteritems():
+ print "Host: "+host
+ if type(paramlist)==type({}):
+ for node in paramlist['nodes']:
+ print node[0]+" = "+node[1]
+ else:
+ print paramlist
+ print
+
+ print 'Try to match PermitRootLogin yes in sshd_config'
+ print c.confmgt_augeas.match('/etc/ssh/sshd_config','PermitRootLogin','yes')
+ print
+
+ print 'Try to match PermitRootLogin yes in sshd_config with an Augeas-style path'
+ print c.confmgt_augeas.match('/etc/ssh/sshd_config/PermitRootLogin','','yes')
+ print
+
+ print 'Try to match PermitRootLogin yes in some config. files.'
+ print c.confmgt_augeas.match('/etc/*/*','PermitRootLogin','yes')
+ print
+
+ print 'Try to match AcceptEnv in sshd_config'
+ print c.confmgt_augeas.match('/etc/ssh/sshd_config','AcceptEnv')
+ print
+
+ print 'Try to match PermitRootLogin in sshd_config'
+ print c.confmgt_augeas.match('/etc/ssh/sshd_config','PermitRootLogin')
+ print
+
+ print 'Try to match PermitRootLogin in sshd_config with an Augeas-style path.'
+ print c.confmgt_augeas.match('/etc/ssh/sshd_config/PermitRootLogin')
+ print
+
+ print 'Try to match canonical entries in hosts file.'
+ print c.confmgt_augeas.match('/etc/hosts','canonical')
+ print
+
+ print 'Try to match canonical entries in hosts file with an Augeas-style path.'
+ print c.confmgt_augeas.match('/etc/hosts/*/canonical')
+ print
+
+ print 'Augeas metainformation.'
+ print c.confmgt_augeas.ls('/','/augeas')
+ print c.confmgt_augeas.get('/','save','/augeas')
+
+ #Not supposed to work:
+ print c.confmgt_augeas.set('/','save','backup','/augeas')
+ print c.confmgt_augeas.set('/save','','backup','/augeas')
+
+ print c.confmgt_augeas.get('/save','','/augeas')
+ print c.confmgt_augeas.get('/files/etc/hosts/lens','info','/augeas')
+
+
+
+ print 'Add a new variable FOO at the end of the last AcceptEnv line of sshd_config'
+ print "And we don't want to do this twice."
+ foomatch=c.confmgt_augeas.match('/etc/ssh/sshd_config','AcceptEnv/*','FOO')
+ for host,matchlist in foomatch.iteritems():
+ if not matchlist:
+ client = fc.Client(host)
+ print client.confmgt_augeas.set('/etc/ssh/sshd_config/AcceptEnv[last()]','10000','FOO')
+ print
+
+
+ print 'Change the (canonical) hostname associated to a specific IP in hosts file.'
+ hostfile='/etc/hosts'
+ ip='1.2.3.4'
+ newCanonical='fozzie'
+ #newCanonical='piggy'
+ # We search which entry in /etc/hosts refers to the IP
+ ipmatch = c.confmgt_augeas.match(hostfile,'ipaddr',ip)
+ # for each minion concerned
+ for host,entry in ipmatch.iteritems():
+ # The first and unique entry in the list, entry[0], is what we searched for
+ # We check that the target canonical hostname is not already set
+ if (type(entry) == type([]) and entry):
+ oldCanonical=c.confmgt_augeas.get(entry[0],'canonical')[host]['value']
+ if oldCanonical != newCanonical:
+ print c.confmgt_augeas.set(entry[0],'canonical',newCanonical)
+ else:
+ print 'Nothing to do'
+ else:
+ print repr(entry)+' - no match'
+ print
+
+basictest()
+
+# Extended path syntax
+def extendedtest():
+ print 'Tests on extended paths'
+ # not working:
+ print c.confmgt_augeas.match('//error/descendant-or-self::*','/augeas')
+ print
+
+ print c.confmgt_augeas.get('/etc/hosts/*[ipaddr = "127.0.0.1"]/canonical')
+ print c.confmgt_augeas.get('/etc/hosts/*[ipaddr = "127.0.0.1"]/','canonical')
+ print
+
+ print c.confmgt_augeas.get("/etc//ipaddr[. = '127.0.0.1']")
+ print
+
+ print c.confmgt_augeas.match('/etc/hosts/*/ipaddr')
+ print c.confmgt_augeas.match('/etc/hosts/*/','ipaddr')
+
+ # not working:
+ #print c.confmgt_augeas.printconf('/etc/hosts/*/','ipaddr')
+ #print
+
+ print c.confmgt_augeas.match('/etc/pam.d/*[.//module="pam_limits.so"]')
+ print
+
+ # not working (wrong):
+ print c.confmgt_augeas.match('/etc//1')
+
+ # not working (wrong):
+ print c.confmgt_augeas.match('/descendant-or-self::4')
+
+extendedtest()
|
[-]
[+]
|
Changed |
func-0.28.tar.bz2/test/test_func_db.py
^
|
@@ -3,7 +3,7 @@
from func import jobthing
import pprint
import time
-
+
def generate_word(type_word,how_many):
"""
generating some test fuzzy words
@@ -15,10 +15,10 @@
#what you can choose
ALLOWED_CHARS = ('*','-','_',';','@','.')
- #may add more later
+ #may add more later
ALLOWED_GLOB_WORDS = ("w-e-i-r-d","foo.com","zoom","@group1","g.r.o.u.p.2","some-hey-com","some_hack.org","1212-32323_blippy-zorg","interesting*;o*n*e")
ALLOWED_MODULE_METHOD_W = ("some_service","s-o-m-e","s_u_m_m_er","FOOO","FoFo","real_stupid-w-e-i-r-d-naME")
-
+
final_word = ""
pickup = []
@@ -34,7 +34,7 @@
else:
final_word = "".join([final_word,word])
- #return the final word back
+ #return the final word back
return final_word
@@ -114,13 +114,13 @@
def test_get_status(self):
#__get_status(jobid)
self.enter_some_data(self.new_jobids)
-
+
for job in self.new_jobids:
for job_id,job_pack in job.iteritems():
result = jobthing.__dict__['__get_status'](job_id)
assert job_pack == result
-
+
def enter_some_data(self,data):
"""
We need that one because every func here uses it at the initial stage
@@ -136,11 +136,11 @@
def create_an_old_jobid(self):
#will be overriden
pass
-
+
def create_new_jobid(self):
#will be overriden
pass
-
+
class TestOverlordDB(BaseFuncDB):
def __init__(self):
@@ -181,16 +181,16 @@
#the stres words :)
gw=generate_word(choice,4)
pack[choice] = gw
-
+
return get_formated_jobid(**pack)
-
+
def create_an_old_jobid(self,base_time):
new_time = self.create_new_jobid().split("-")
new_time[len(new_time)-1]=str(base_time)
return "-".join(new_time)
-
+
def test_old_new_upgrade(self):
#that will do some control if some users has old_ids and
@@ -203,7 +203,7 @@
tmp_hash = {}
tmp_hash[job_id] = (self.status_opt[randint(0,len(self.status_opt)-1)],{"some_old_type_new.com":self.test_result})
old_type_new.append(tmp_hash)
-
+
#create 5 old type job ids with older time
base_time = self.an_old_time
for n in xrange(5):
@@ -212,7 +212,7 @@
tmp_hash[job_id] = (self.status_opt[randint(0,len(self.status_opt)-1)],{"some_old_type.com":self.test_result})
old_type_old.append(tmp_hash)
base_time = base_time + 10
-
+
#print "The old type pack is : ",old_type_new
#print "The old type pack is : ",old_type_old
#enter also that ids into database
@@ -223,7 +223,7 @@
self.enter_some_data(self.new_jobids)
self.enter_some_data(self.old_jobids)
-
+
#db_results = jobthing.get_open_ids()
#print "The current db results are : ",db_results
@@ -236,7 +236,7 @@
for job_id,job_pack in job.iteritems():
assert db_results.has_key(job_id) == True
assert db_results[job_id] == job_pack[0]
-
+
def access_update_stress(self):
"""
@@ -251,7 +251,7 @@
print "Entering data test is over"
def access_delete_stress(self):
-
+
if not self.new_jobids or not self.old_jobids:
self.create_lots_of_ids(how_many,self.new_jobids,"old")
self.create_lots_of_ids(how_many,self.old_jobids,"new")
@@ -260,7 +260,7 @@
print "Old ids were removed succesfully "
def create_lots_of_ids(self,how_many,to_object,type_id):
- #generates lots of weird named
+ #generates lots of weird named
import time
base_time = self.an_old_time
for new_id in xrange(how_many):
@@ -282,7 +282,7 @@
class TestMinionDB(BaseFuncDB):
-
+
def __init__(self):
super(TestMinionDB,self).__init__()
self.status_opt = [jobthing.JOB_ID_RUNNING,jobthing.JOB_ID_FINISHED,jobthing.JOB_ID_PARTIAL,jobthing.JOB_ID_LOST_IN_SPACE,jobthing.JOB_ID_REMOTE_ERROR]
|
[-]
[+]
|
Changed |
version
^
|
@@ -1 +1 @@
-0.24 1
+0.28
|