commit-gnue
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

r5735 - in trunk/gnue-common/src/formatting: . masks


From: jcater
Subject: r5735 - in trunk/gnue-common/src/formatting: . masks
Date: Fri, 16 Apr 2004 18:42:53 -0500 (CDT)

Author: jcater
Date: 2004-04-16 18:42:52 -0500 (Fri, 16 Apr 2004)
New Revision: 5735

Added:
   trunk/gnue-common/src/formatting/masks/
   trunk/gnue-common/src/formatting/masks/Errors.py
   trunk/gnue-common/src/formatting/masks/FormatMask.py
   trunk/gnue-common/src/formatting/masks/InputMask.py
   trunk/gnue-common/src/formatting/masks/MaskParser.py
   trunk/gnue-common/src/formatting/masks/Masks.py
   trunk/gnue-common/src/formatting/masks/Tokens.py
   trunk/gnue-common/src/formatting/masks/test.py
Log:
moved masks into gnue-common

Added: trunk/gnue-common/src/formatting/masks/Errors.py
===================================================================
--- trunk/gnue-common/src/formatting/masks/Errors.py    2004-04-16 23:41:18 UTC 
(rev 5734)
+++ trunk/gnue-common/src/formatting/masks/Errors.py    2004-04-16 23:42:52 UTC 
(rev 5735)
@@ -0,0 +1,38 @@
+#
+# This file is part of GNU Enterprise.
+#
+# GNU Enterprise is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2, or (at your option) any later version.
+#
+# GNU Enterprise is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with program; see the file COPYING. If not,
+# write to the Free Software Foundation, Inc., 59 Temple Place
+# - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright 2001-2004 Free Software Foundation
+#
+# FILE:
+# Errors.py
+#
+# DESCRIPTION:
+"""
+Exceptions specific to the mask system
+"""
+# NOTES:
+#
+
+class MaskError(StandardError):
+  pass
+
+class MaskDefinitionError(MaskError):
+  pass
+
+class InvalidInputCharacter(MaskError):
+  pass

Added: trunk/gnue-common/src/formatting/masks/FormatMask.py
===================================================================

Added: trunk/gnue-common/src/formatting/masks/InputMask.py
===================================================================
--- trunk/gnue-common/src/formatting/masks/InputMask.py 2004-04-16 23:41:18 UTC 
(rev 5734)
+++ trunk/gnue-common/src/formatting/masks/InputMask.py 2004-04-16 23:42:52 UTC 
(rev 5735)
@@ -0,0 +1,470 @@
+#
+# This file is part of GNU Enterprise.
+#
+# GNU Enterprise is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2, or (at your option) any later version.
+#
+# GNU Enterprise is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with program; see the file COPYING. If not,
+# write to the Free Software Foundation, Inc., 59 Temple Place
+# - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright 2001-2004 Free Software Foundation
+#
+# FILE:
+# InputMask.py
+#
+# DESCRIPTION:
+"""
+Input masks for GNUe Forms, et al
+Based on lex/yacc parsing (via Plex)
+"""
+# NOTES:
+#
+
+from gnue.common.external.plex import *
+
+from cStringIO import StringIO
+import string
+import MaskParser
+from Errors import *
+import Tokens
+
+class InputMask:
+  """
+  All functions return a tuple containing:
+    (displayed representation, cursor position)
+
+  or can raise InvalidInputCharacter
+  """
+
+  # Handle via gConfig?
+  placeholder = "_"
+
+
+  ####################################################################
+  #
+  #  Editing calls
+  #
+  def begin(self, value=None):
+    """
+    Resets the mask processor.
+    """
+    self.text = ""
+    self.cursor = 0
+    self.waitingForRTL = self.rtl_pos
+    components = [None]*len(self.tokens)
+    self._parseInput(self.text)
+    return self._adjustCursor(0)
+
+  def add(self, text, replace=0):
+    """
+    Insert text character
+    """
+    disp = ""
+    emptyDisplay = self.emptyDisplay
+    inputted = self.inputted
+    tokens = self.tokens
+    last_token = len(tokens) - 1
+    i = 0
+    print
+    print "Cursor=%s" % self.cursor
+    while i <= last_token:
+      if isinstance(self.tokens[i], Tokens.LitTok):
+        if len(disp) < self.cursor:
+          disp += emptyDisplay[i]
+      else:
+        if self.inputted[i]:
+          disp += inputted[i]
+      i += 1
+
+    print "DISP=%s" % disp
+    newtxt = disp[:self.cursor] + text + disp[self.cursor + replace:]
+    print "NEW=%s" % newtxt
+    print newtxt
+    self._parseInput(newtxt)
+    print self.cursor + len(text) - replace
+    if text:
+      return self._adjustCursor(self.cursor + len(text) - replace)
+    else:
+      return (self.display, self.cursor)
+
+  def delete(self):
+    """
+    Delete forwards
+    """
+    return self.add("", 1)
+
+  def backspace(self):
+    """
+    Delete backwards
+    """
+    cursor = self.cursor
+    display, cursor2 = self._adjustCursor(self.cursor-1, True)
+    if cursor != cursor2:
+      return self.add("", 1)
+
+
+  def move(self, pos):
+    """
+    Move the cursor to a new position.
+    Usually results from a mouse click.
+    pos is a physical cursor position; the
+    internal code calculates the logical
+    position and, hence, the actual new
+    physical position.
+    """
+    return self._adjustCursor(pos)
+
+
+  def moveLeft(self):
+    """
+    Move the cursor left one character. The
+    internal code calculates the logical
+    position and, hence, the actual new
+    physical position.
+    """
+    return self._adjustCursor(self.cursor-1, True)
+
+  def moveRight(self):
+    """
+    Move the cursor right one character. The
+    internal code calculates the logical
+    position and, hence, the actual new
+    physical position.
+    """
+    return self._adjustCursor(self.cursor+1)
+
+  def moveHome(self):
+    """
+    Move the cursor to the beginning of
+    the text. The internal code calculates
+    the logical position and, hence, the
+    actual new physical position.
+    """
+    return self._adjustCursor(0)
+
+  def moveEnd(self):
+    """
+    Move the cursor to the end of the
+    text. The internal code calculates
+    the logical position and, hence,
+    the actual new physical position.
+    """
+    return self._adjustCursor(len(self.display))
+
+
+  ##
+  ##
+  ##
+  def _adjustCursor(self, pos, left=False):
+    """
+    Moves the cursor to a new position.
+    """
+
+    if pos < 0:
+      pos = 0
+
+    print "Pos=%s" % pos
+    print "Adjusting cursor to %s" % pos
+
+    rpos = 0
+    token_at = 0
+    tokens = self.tokens
+    last_token = len(tokens) - 1
+    while rpos < pos and token_at < last_token:
+      rpos += len(self.actualDisplay[token_at])
+      token_at += 1
+
+    if rpos > pos:
+      # This can happen if a token is partially complete
+      token_at -= 1
+    elif rpos + len(self.inputted[token_at]) < pos:
+      # This can happen at the end of the string
+      pos = rpos + len(self.inputted[token_at])
+
+    print "Token at %s, pos=%s, rpos=%s" % (token_at, pos, rpos)
+
+    if left:
+      while token_at > 0 and isinstance(self.tokens[token_at],Tokens.LitTok):
+        pos -= len(self.emptyDisplay[token_at])
+        token_at -= 1
+    else:
+      while token_at < last_token and \
+            isinstance(self.tokens[token_at],Tokens.LitTok):
+        pos += len(self.emptyDisplay[token_at])
+        token_at += 1
+
+
+    print "Deciding on %s" % pos
+    self.cursor = pos
+    return (self.display, pos)
+
+
+  ##
+  ##
+  ##
+  def _parseInput(self, newtext=""):
+    """
+    Parses an input string into its components
+    and sets the resultant display
+    """
+
+    cursor = self.cursor
+
+    tokens = self.tokens
+    inputted = [""] * len(self.tokens)
+    inputted_states = []
+    for f in range(len(self.tokens)):
+      inputted_states.append([])
+
+    first_state = 0
+    last_state = -1
+    if newtext:
+      scanner = Scanner(self.lexicon, StringIO(newtext), newtext)
+      try:
+        while True:
+          parsed, extra = scanner.read()
+          if parsed is None:
+            last_state = self.eof_nextstate[0]
+            break
+          else:
+#            print parsed
+            state, char = parsed
+            mstate = state[0]
+            inputted_states[mstate].append(state)
+            inputted[mstate] += char
+            if first_state is None:
+              first_state = mstate
+
+      except Errors.PlexError, msg:
+        raise InvalidInputCharacter, msg
+
+
+    # Calculate the last token's position (including any literals)
+    numtokens = len(self.tokens)
+    last_token = numtokens - 1
+    last_pos = last_token
+
+    while last_pos > 0 and not inputted[last_pos]:
+      last_pos -= 1
+
+    if last_pos < last_token and isinstance(tokens[last_pos+1], Tokens.LitTok):
+      last_pos += 1
+      while last_pos < last_token and \
+            isinstance(tokens[last_pos], Tokens.LitTok):
+        last_pos += 1
+
+
+    # Wait until after any exceptions are raised
+    # before storing state variables (in case input
+    # was invalid, we still have the old, valid input
+    # to refer to for future cursor movement.
+    self.inputted = inputted
+    self.last_state = last_state
+    self.first_state = first_state
+    self.last_pos = last_pos
+
+    #
+    # Calculate the displayed text, with any placeholders
+    #
+    disp = ""
+    emptyDisplay = self.emptyDisplay
+    actualDisplay = self.actualDisplay = []
+    for i in range(len(self.tokens)):
+      if isinstance(tokens, Tokens.LitTok):
+        actualDisplay.append(emptyDisplay[i])
+        inputted[i] = ""
+      else:
+        if not inputted[i]:
+          actualDisplay.append(emptyDisplay[i])
+        else:
+          if i == last_state:
+            # The last input token may not be complete,
+            # so add any extra _ placeholders.
+            actualDisplay.append(inputted[i] + 
emptyDisplay[i][len(inputted[i]):])
+          else:
+            oi = inputted[i]
+            ni = tokens[i].getProperDisplay(inputted[i])
+            self.cursor += len(ni) - len(oi)
+            inputted[i] = ni
+            actualDisplay.append(inputted[i])
+
+    self.display = string.join(actualDisplay,"")
+
+    return (self.display, self.cursor)
+
+
+
+  ####################################################################
+  #
+  #  Internal lexicon init crap
+  #
+
+  def __init__(self, mask, numeric=False, date=False):
+    parser = MaskParser.InputMaskParser(StringIO(mask),'inline', numeric, date)
+
+    self.isnumeric = numeric
+    self.isdate = date
+    self.tokens = tokens = []
+
+    # List of all tokens. Note that all {#}
+    # expansions have already happened.
+    ptokens = parser.tokens
+
+    # If non-zero, position of the right-to-left token
+    rtl_pos = self.rtl_pos = parser.rtl_pos
+
+    # text, numeric, or date
+    self.type = parser.type
+
+    validationRule = None
+
+    # This contains a list of each token's "empty" marker,
+    # which will usually be '_' or, if a literal, the literal's
+    # value.
+    self.emptyDisplay = []
+
+    #
+    # Process each returned parser token
+    # and convert it into a mask token.
+    #
+    i = 0
+    while i < len(ptokens):
+      ptoken=ptokens[i]
+      if isinstance(ptoken ,MaskParser.Literal):
+        chars = ""
+        # Merge consecutive literals into one rule
+        # (makes for easier logic)
+        while i < len(ptokens) and isinstance(ptokens[i], MaskParser.Literal):
+          chars += ptokens[i].token
+          i += 1
+        token = Tokens.tLiteral(chars)
+        i -= 1 # Because we add one later...
+      elif isinstance(ptoken ,MaskParser.TokenSet):
+        if ptoken.numeric:
+          token = Tokens.tCustomNum(ptoken.token)
+        else:
+          token = Tokens.tCustom(ptoken.token)
+      else:
+        token = Tokens.tokenMap[ptoken.token]()
+
+      # Honor force_upper/lower
+      try:
+        if ptoken.force_upper:
+          token.force_upper = True
+        elif ptoken.force_lower:
+          token.force_lower = True
+      except AttributeError:
+        pass
+
+      # Save token
+      tokens.append(token)
+
+      # Calculate "empty" displayed value
+      if token.maskchar:
+        self.emptyDisplay.append(token.maskchar)
+      elif token.autochar:
+        self.emptyDisplay.append(token.autochar)
+      else:
+        self.emptyDisplay.append(self.placeholder*token.maxchars)
+
+      i += 1
+
+    #
+    # Next, we will build the actual lexicon. We start
+    # at the end of the mask and work backwards, as
+    # any optional mask tokens will need to reference the
+    # next token's initial grammar elements.
+    #
+    i = len(tokens)
+    lexicon = [
+       # The first rule will always be Bol (to init stuff)
+       State("", [(Bol, Begin((0,0,0))) ]),
+       # The final rule prevents any trailing characters
+       # The Eof is just a dummy rule that won't ever be matched.
+       State((i,0,0), [(Eof, IGNORE)]) ]
+
+    last_leadin = []
+    leadin = []
+
+    while i > 0:
+      i -= 1
+      token = tokens[i]
+      if not token.optional:
+        leadin = []
+
+
+      j = 0
+      for ruleset in token.paths:
+        ks = 0
+        possibly_completed = False
+        for k in range(len(ruleset)):
+          path = ruleset[k]
+          lexi = []
+          try:
+            possibly_completed = possibly_completed or \
+                                 ruleset[k+1]==Tokens.forcible
+          except IndexError:
+            pass
+
+          if not path == Tokens.forcible:
+
+            if (k < len(ruleset) - 1):
+              next_state = (i, j, ks+1)
+            else:
+              next_state = (i+1,0,0)
+            rule = (path,
+               lambda p, t, c=self._tokenFound, st=(i, j, ks), ns=next_state:
+                  c(p, t, st, ns))
+
+            if k == 0:
+              leadin.append(rule)
+            lexi.append(rule)
+            if possibly_completed:
+              lexi += last_leadin
+            if j or ks:
+              lexicon.append((State((i, j, ks), lexi)))
+            ks += 1
+
+
+        j += 1
+
+      lexicon.append(State((i,0,0), leadin[:]))
+
+      last_leadin = leadin
+
+##     Tokens.printLexiconTree(lexicon)
+
+    # Create a consolidated validation rule so we
+    # can test if inputted string is "complete".
+    self.validationRule = Tokens.buildValidationRule(tokens)
+
+    # Pre-compile the lexicon for this mask
+    DEBUG=StringIO()
+    self.lexicon = Lexicon(lexicon, DEBUG)
+    DEBUG.seek(0)
+##    print DEBUG.read()
+
+  def _tokenFound(self, parser, text, curstate, nextstate):
+    """
+    Called when an input character is found.
+    """
+    parser.produce((curstate,text))
+    parser.begin(nextstate)
+    self.eof_nextstate = nextstate
+
+
+class EOF:
+  """
+  Internal class used to return an EOF to our input loop.
+  """
+  def __init__(self, state):
+    self.state = state
\ No newline at end of file

Added: trunk/gnue-common/src/formatting/masks/MaskParser.py
===================================================================
--- trunk/gnue-common/src/formatting/masks/MaskParser.py        2004-04-16 
23:41:18 UTC (rev 5734)
+++ trunk/gnue-common/src/formatting/masks/MaskParser.py        2004-04-16 
23:42:52 UTC (rev 5735)
@@ -0,0 +1,321 @@
+#
+# This file is part of GNU Enterprise.
+#
+# GNU Enterprise is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2, or (at your option) any later version.
+#
+# GNU Enterprise is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with program; see the file COPYING. If not,
+# write to the Free Software Foundation, Inc., 59 Temple Place
+# - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright 2001-2004 Free Software Foundation
+#
+# FILE:
+# masks.py
+#
+# DESCRIPTION:
+"""
+Parse mask definitions into tokens that the Mask class can use.
+"""
+# NOTES:
+#
+
+from gnue.common.external.plex import *
+import string
+from Errors import *
+
+digit = Any(string.digits)
+
+class BaseToken:
+  """
+  Basic parser class. Not used directly,
+  but inherited by Literal, Token, etc.
+  """
+  numeric=False
+  date=False
+  text=False
+  literal=False
+  token=False
+
+  def __init__(self, t1, t2=None, *args):
+    if t2:
+      self.token = t2
+    else:
+      self.token = t1
+
+class Token(BaseToken):
+  """
+  Base token. (as opposed to literal, etc)
+  """
+  force_lower = False
+  force_upper = False
+  token=True
+
+class NumberToken(Token):
+  """
+  Numeric token (#9-,.)
+  """
+  numeric = True
+
+class DateToken(Token):
+  """
+  Date token (MDYyHIS:/)
+  """
+  date=True
+
+class TextToken(Token):
+  """
+  Text token
+  """
+  text = True
+
+class TokenSet(Token):
+  """
+  Token defined by user with [] notation.
+  Can behave like a  NumberToken or TextToken,
+  depending on contents of [].
+  """
+  def __init__(self, token, *args):
+    # TODO: Expand the set
+    # Are we all-numeric?
+    self.numeric = True
+    self.token = token
+    for t in token:
+      if not t in string.digits:
+        self.numeric = False
+    if not self.numeric:
+      self.text = True
+
+
+class Literal(BaseToken):
+  """
+  A literal string that the developer wants in the string.
+  Note that for our purposes, the basic "literals" aren't
+  really Literals(), but special cases of Token classes.
+  So all literals represented by this class are denoted
+  with \ or "" syntaxes.
+  """
+  literal=True
+
+
+class RightToLeft(BaseToken):
+  """
+  Temporary token class used to note the
+  position of ! modifiers
+  """
+  numeric = True
+
+class CaseModifier:
+  """
+  Temporary token class used to record > and <
+  markers. These cause the modified token to have
+  either force_upper or force_lower set, so the
+  other classes won't ever see CaseModifier
+  instances.
+  """
+  pass
+
+class Repeater:
+  """
+  Temporary token class used to record {#}
+  markers. These are replaced with the actual
+  represented tokens before being passed out
+  of MaskParser (i.e., 0{3} would be returned
+  as 000, so the other classes won't ever see
+  Repeater instances.
+  """
+  def __init__(self, count):
+    self.count = count
+
+##
+##
+##
+class InputMaskParser(Scanner):
+  def getType(self):
+    """
+    Returns the apparent type of this mask. One of text, numeric, or date.
+    """
+    return type
+
+  def getTokens(self):
+    return self.tokens[:]
+
+  #
+  # Private stuff
+  #
+  def _check_single(self, text):
+    if text in self.__singles:
+      raise Errors.UnrecognizedInput(self, 'Mask can only have one "%s" token' 
%text)
+    self.__singles.append(text)
+    if text == '!':
+      self.produce (RightToLeft(text))
+    elif text in '.+,':
+      self.produce(NumberToken(text))
+    else:
+      self.produce(TextToken(text))
+
+  def _literal(self, text):
+    self.produce(Literal(text))
+
+  def _literal_2nd(self, text):
+    return self.literal(text[1:])
+
+  def _escape(self, text):
+    self.begin('')
+    self.produce(Literal(text))
+
+  def _repeater(self, text):
+    self.produce(Repeater(int(text)))
+
+  def _begin_set(self, text):
+    self.begin('set')
+    self._set = ""
+
+  def _add_set(self, text):
+    self._set += text
+
+  def _add_set_2nd(self, text):
+    return self.add_set(text[1:])
+
+  def _end_set(self, text):
+    self.begin('')
+    self.produce(TokenSet(self._set))
+
+  # Basic lexicon used by both input and output masks
+  _lexicon = [
+      State('escape',  [
+          (AnyChar,        _escape),
+        ]),
+
+      State('quoted',  [
+          (Str("\\")+Str("'"),  _literal_2nd),
+          (Str("'"),       Begin('')),
+          (AnyChar,        _literal)
+        ]),
+
+      State('quoted2',  [
+          (Str("\\")+Str('"'),  _literal_2nd),
+          (Str('"'),       Begin('')),
+          (AnyChar,        _literal)
+        ]),
+
+      State('repeater',  [
+          (Str('}'),       Begin('')),
+          (Rep1(digit),    _repeater)
+        ]),
+
+      State('set',  [
+          (Str("\\")+Any('[]'),  _add_set_2nd),
+          (Str(']'),       _end_set),
+          (AnyChar,        _add_set)
+        ]),
+
+      (Str('\\'),          Begin('escape')),
+      (Str("'"),           Begin('quoted')),
+      (Str('"'),           Begin('quoted2')),
+      (Str('{'),           Begin('repeater')),
+      (Str('['),           _begin_set),
+      (Str(' '),           Literal),
+      (Any('+.,'),        _check_single),
+      (Any('_?AaLlCc'),    TextToken),
+      (Any('MDYyHISPp:/'), DateToken),
+      (Any('#0'),          NumberToken),
+      (Any('<>'),          CaseModifier)
+  ]
+
+
+  # Lexicon used by input masks
+  _extra_lexicon = [
+        (Any('!'),        _check_single),
+  ]
+
+  def __process(self, token):
+    if isinstance(token,Repeater):
+      for i in range(0, token.count-1):
+        self.__process(self.__last)
+    elif isinstance(token, CaseModifier):
+      self.__modify.append(token)
+    else:
+      if self.__modify and isinstance(token, TextToken):
+        mod = self.__modify.pop(0)
+        if mod.token == '<':
+          token.force_upper = True
+        elif mod.token == '>':
+          token.force_lower = True
+      self.tokens.append(token)
+
+    self.__last = token
+
+  def __init__(self, file, name, numeric=False, date=False):
+
+    self.__singles = []
+    self.tokens = []
+    self.__last = None
+    self.__modify = []
+
+    try:
+      Scanner.__init__(self, Lexicon(self._lexicon + self._extra_lexicon), 
file, name)
+
+      while True:
+        token, extra = self.read()
+        if token is None:
+          break
+        self.__process(token)
+    except Errors.PlexError, msg:
+      raise MaskDefinitionError, msg
+
+    if self.__modify:
+      print "WARNING: Modifier found at end of mask."
+
+    # If any two of these are non-zero, then the
+    # mask is a text mask, not date or numeric.
+    num_markers = 0
+    date_markers = 0
+    text_markers = 0
+    rtl_pos = -1
+
+    i = 0
+    for token in self.tokens:
+      if isinstance(token,RightToLeft):
+        rtl_pos = i
+      if not isinstance(token, Literal):
+        if token.numeric:
+          num_markers += 1
+        elif token.date:
+          date_markers += 1
+        else:
+          text_markers += 1
+      i += 1
+
+    if not (num_markers or date_markers or text_markers):
+      raise MaskDefinitionError, 'Mask has no character tokens'
+
+    if numeric and (date_markers or text_markers):
+      raise MaskDefinitionError, 'Numeric mask has non-numeric tokens'
+
+    if date and (num_markers or text_markers):
+      raise MaskDefinitionError, 'Date/Time mask has non-date tokens'
+
+    # Check for "!" in non-numeric mask
+    if rtl_pos >= 0:
+      self.tokens.pop(rtl_pos)
+    else:
+      rtl_pos = 0
+
+    self.rtl_pos = rtl_pos
+
+    if (num_markers and date_markers) or text_markers:
+      self.type = 'text'
+    elif num_markers:
+      self.type = 'numeric'
+    else:
+      self.type = 'date'
+

Added: trunk/gnue-common/src/formatting/masks/Masks.py
===================================================================
--- trunk/gnue-common/src/formatting/masks/Masks.py     2004-04-16 23:41:18 UTC 
(rev 5734)
+++ trunk/gnue-common/src/formatting/masks/Masks.py     2004-04-16 23:42:52 UTC 
(rev 5735)
@@ -0,0 +1,72 @@
+#
+# This file is part of GNU Enterprise.
+#
+# GNU Enterprise is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2, or (at your option) any later version.
+#
+# GNU Enterprise is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with program; see the file COPYING. If not,
+# write to the Free Software Foundation, Inc., 59 Temple Place
+# - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright 2001-2004 Free Software Foundation
+#
+# FILE:
+# masks.py
+#
+# DESCRIPTION:
+"""
+Input masks for GNUe Forms, et al
+Based on lex/yacc parsing (via Plex)
+
+Programs should use InputMask() or FormatMask()
+to create masks.  Then, the internal library manager
+will reuse existing masks if possible.
+"""
+# NOTES:
+#
+
+__all__ = ['InputMask', 'FormatMask']
+
+from InputMask import InputMask as _InputMask
+
+class MaskLibrary:
+  """
+  Convenience class that keeps track of any existing mask definitions,
+  so we don't have to duplicate mask instances if we've already
+  encountered one before.
+  """
+  def __init__(self):
+    self._inputMaskMap = {}
+    self._maskMap = {}
+
+  def getInputMask(self, mask, numeric=False, date=False):
+    key = (numeric, date, mask)
+    try:
+      handler = self._inputMaskMap[key]
+      gDebug(5,'Reusing existing mask for %s' % mask)
+    except KeyError:
+      gDebug(5,'Creating mask handler for %s' % mask)
+      handler = self._inputMaskMap[key] = _InputMask(mask, numeric, date)
+    return handler
+
+  def getFormatMask(self, mask):
+    try:
+      handler = self._inputMaskMap[mask]
+      gDebug(5,'Reusing existing mask for %s' % mask)
+    except KeyError:
+      gDebug(5,'Creating mask handler for %s' % mask)
+      handler = self._inputMaskMap[mask] = None  #MaskParser(mask, numeric, 
date)
+    return handler
+
+
+library = MaskLibrary()
+InputMask = library.getInputMask
+FormatMask = library.getFormatMask

Added: trunk/gnue-common/src/formatting/masks/Tokens.py
===================================================================
--- trunk/gnue-common/src/formatting/masks/Tokens.py    2004-04-16 23:41:18 UTC 
(rev 5734)
+++ trunk/gnue-common/src/formatting/masks/Tokens.py    2004-04-16 23:42:52 UTC 
(rev 5735)
@@ -0,0 +1,449 @@
+#
+# This file is part of GNU Enterprise.
+#
+# GNU Enterprise is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2, or (at your option) any later version.
+#
+# GNU Enterprise is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with program; see the file COPYING. If not,
+# write to the Free Software Foundation, Inc., 59 Temple Place
+# - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright 2001-2004 Free Software Foundation
+#
+# FILE:
+# Tokens.py
+#
+# DESCRIPTION:
+"""
+Input masks for GNUe Forms, et al
+Based on lex/yacc parsing (via Plex)
+"""
+# NOTES:
+#
+
+from gnue.common.external.plex import *
+
+import string
+import locale
+
+FORCE_UPPER = 1
+FORCE_LOWER = 0
+
+digit = Any(string.digits)
+letter = Any(string.letters)
+
+######################################################################
+#
+# Base tokens
+#
+class Tok:
+  """
+  Base token
+  """
+
+  # True if this character is optional
+  optional = False
+
+  # If set, the character to auto-fill the string with
+  autochar = None
+
+  # If set, the mask char to fill the display with
+  # (Note: autochar takes precedence; default is _)
+  maskchar = None
+
+  # A list of partial grammar rules to
+  # build our character-at-a-time parser
+  # This list should itself contain lists
+  paths = []
+
+  # Number of characters this space takes up
+  maxchars = 1
+
+  # Left pad with zeros
+  # (only makes sense if maxchars > 1)
+  zero_pad = False
+
+  # As implied...
+  force_upper = False
+  force_lower = False
+
+  def __repr__(self):
+    return "%s(%s)" % (self.__class__.__name__, self.symbol)
+
+  def getProperDisplay(self, text):
+    if self.zero_pad and self.maxchars == 2 and len(text) == 1:
+      return "0" + text
+    else:
+      return text
+
+class TextTok(Tok):
+  """
+  Base text token
+  """
+
+class DateTok(Tok):
+  """
+  Base date token
+  """
+
+class NumTok(Tok):
+  """
+  Base numeric token
+  """
+
+class LitTok(Tok):
+  """
+  Base literal token
+  """
+  optional = True
+  def __repr__(self):
+    return "%s(%s)" % (self.__class__.__name__, self.autochar)
+
+
+class forcible:
+  """
+  This is a placeholder for the paths=[]
+  that denotes when a mask can be considered complete
+  *if* a forced marker is provided.
+  """
+
+######################################################################
+#
+# Base tokens
+#
+class tChar(TextTok):
+  """
+  Any character, required
+  """
+  symbol='_'
+  paths=[[Any(string.letters+string.digits+' '+string.punctuation)]]
+
+class tCharOpt(tChar):
+  """
+  Any character, optional
+  """
+  symbol='?'
+  optional=True
+
+class tA(TextTok):
+  """
+  Any alphanumeric, required
+  """
+  symbol='A'
+  paths=[[Any(string.letters+string.digits)]]
+
+class ta(tA):
+  """
+  Any alphanumeric, optional
+  """
+  symbol='a'
+  optional=True
+
+class tL(TextTok):
+  """
+  Any letter, required
+  """
+  symbol='L'
+  paths=[[letter]]
+
+class tl(tL):
+  """
+  Any letter, optional
+  """
+  symbol='l'
+  optional=True
+
+class tC(TextTok):
+  """
+  Any character (alphanum) or space, required
+  """
+  symbol='C'
+  paths=[[Any(string.letters+string.digits+' ')]]
+
+class tc(tC):
+  """
+  Any character (alphanum) or space, optional
+  """
+  symbol='c'
+  optional=True
+
+class tsign(NumTok):
+  """
+  Positive or negative sign (one per mask) (literal)
+  """
+  symbol='-'
+  optional=True
+  paths=[[Any('+-')]]
+
+class tDigit(NumTok):
+  """
+  Any digit, required
+  """
+  symbol='0'
+  paths=[[digit]]
+#  optional=True  # For input masks, this is largely true?
+
+class tDigitOpt(tDigit):
+  """
+  Any digit, optional
+  """
+  symbol='#'
+  optional=True
+
+class tM(DateTok):
+  """
+  Month, with zero padding
+  """
+  symbol='M'
+  maxchars = 2
+  zero_pad = True
+  paths=[ [ Str('1'), forcible, Any('012') ],
+          [ Str('0'), Range('19') ],
+          [ Range('29') ] ]
+
+class tm(tM):
+  """
+  Month, no zero padding
+  """
+  symbol = 'm'
+
+class tD(DateTok):
+  """
+  Day
+  """
+  symbol='D'
+  zero_pad = True
+  maxchars = 2
+  paths=[ [ Str('3'), forcible, Any('01') ],
+          [ Any('12'), forcible, digit ],
+          [ Str('0'), Range('19')],
+          [ Range('49') ] ]
+
+class td(tD):
+  """
+  Day, no zero padding
+  """
+  symbol = 'd'
+
+class tY(DateTok):
+  """
+  Year - 4 digits
+  """
+  symbol='Y'
+  maxchars = 4
+  paths = [ [ digit ]*4 ]
+
+class ty(DateTok):
+  """
+  Year - 2 digits
+  """
+  symbol='y'
+  maxchars = 2
+  paths = [ [ digit ]*2 ]
+
+class tH(DateTok):
+  """
+  Hour
+  """
+  symbol='H'
+  maxchars = 2
+  paths = [ [ Str('2'),forcible, Any('0123') ],
+            [ Any('01'), forcible, digit ],
+            [ Range('39') ] ]
+
+class tI(DateTok):
+  """
+  Minute
+  """
+  symbol='I'
+  maxchars = 2
+  paths = [ [Any('012345'), digit ] ]
+
+class tS(DateTok):
+  """
+  Seconds
+  """
+  symbol='S'
+  maxchars = 2
+  paths = [ [ Any('012345'), digit ] ]
+
+class tP(DateTok):
+  """
+  PM AM token
+  """
+  symbol='P'
+  maxchars = 2
+  paths = [ [ NoCase(Str('p','a')), NoCase(Str('m')) ] ]
+  force_upper = True
+
+class tp(tP):
+  """
+  pm am token
+  """
+  symbol='p'
+  maxchars = 2
+  force_lower = True
+
+class tLiteral(LitTok):
+  def __init__(self, char):
+    path = []
+    for ch in char:
+      path.append(Str(ch))
+    if len(char) == 1:
+      self.symbol = "\\%s" % char
+    else:
+      self.symbol = '"' + char.replace('\\','\\\\').replace('"','\\"') + '"'
+    self.paths = [path]
+    self.autochar = char
+
+class tDecSep(LitTok):
+  """
+  Decimal separator
+  """
+  symbol='.'
+  autochar=locale.localeconv()['decimal_point'] or '.'
+  paths = [[Str(autochar)]]
+
+class tThouSep(LitTok):
+  """
+  Thousands separator
+  """
+  symbol=','
+  autochar=locale.localeconv()['thousands_sep'] or ','
+  paths = [[Str(autochar)]]
+
+class tTimeSep(LitTok):
+  """
+  Time Separator
+  """
+  symbol=':'
+  autochar=':'  # TODO: *Where* is this in locale?!?!?
+  paths=[[Str(autochar)]]
+
+
+class tDateSep(LitTok):
+  """
+  Date Separator
+  """
+  symbol='/'
+  autochar='/' # TODO: *Where* is this in locale?!?!?
+  paths=[[Str(autochar)]]
+
+class tCustom(TextTok):
+  """
+  Custom text token
+  (Passed in a set of valid characters)
+  """
+  def __init__(self, chars):
+    self.paths = [[Any(chars)]]
+
+class tCustomNum(NumTok):
+  """
+  Custom numeric token
+  (Passed in a set of valid digits)
+  """
+  def __init__(self, chars):
+    self.paths = [[Any(chars)]]
+    self.symbol = '[%s]' % 
chars.replace('\\','\\\\').replace(']','\\]').replace('-','\\-')
+
+
+
+def buildSingleValidationRule(token, honorOptional=True):
+  """
+  Build a validation rule for a specific token
+  """
+  val = None
+  for ruleset in token.paths:
+    v2 = v3 = None
+    startoptional = False
+    for rule in ruleset:
+      if rule == forcible:
+        startoptional = True
+        continue
+      if startoptional:
+        if v3 is None:
+          v3 = rule
+        else:
+          v3 = v3 + rule
+      else:
+        if v2 is None:
+          v2 = rule
+        else:
+          v2 = v2 + rule
+    if v3 is not None:
+      v2 = v2 + v3
+    if val is None:
+      val = v2
+    else:
+      val = val | v2
+  if honorOptional and token.optional:
+    return Opt(val)
+  else:
+    return val
+
+
+def buildValidationRule(tokens):
+  """
+  Take a list of tokens and combine all their rule paths
+  into a single rule that validates whether a string is
+  "complete" wrt the input mask or not.
+  """
+  val = Bol
+  for token in tokens:
+    val = val + buildSingleValidationRule(token)
+  if not tokens:
+    val = val + Rep1(AnyChar)
+  return val + Eol
+
+
+tokenMap = {
+    # Input/output tokens
+    '_':  tChar,    # Any character, required
+    '?':  tCharOpt, # Any character, optional
+    'A':  tA,       # Any alphanumeric, required
+    'a':  ta,       # Any alphanumeric, optional
+    'L':  tL,       # Any letter, required
+    'l':  tl,       # Any letter, optional
+    'C':  tC,       # Any character (alphanum) or space, required
+    'c':  tc,       # Any character (alphanum) or space, optional
+    '+':  tsign,    # Positive or negative sign (one per mask)
+    '0':  tDigit,   # Any digit, required
+    '#':  tDigitOpt, # Any digit, optional
+    'M':  tM,       # Month, zero padding
+    'D':  tD,       # Day, zero padding
+    'Y':  tY,       # Year - 4 digits
+    'y':  ty,       # Year - 2 digits
+    'H':  tH,       # Hour
+    'I':  tI,       # Minute
+    'S':  tS,       # Seconds
+    'P':  tP,       # PM AM token
+    'p':  tp,       # pm am token
+    '.':  tDecSep,  # Decimal separator
+    ',':  tThouSep, # Thousands separator
+    ':':  tTimeSep, # Time Separator
+    '/':  tDateSep, # Date Separator
+    # Output-only
+    'm':  tm,       # Month, no zero padding
+    'd':  td,       # Day, no zero padding
+}
+
+
+###########
+def printLexiconTree(lexicon, indent=0):
+  for foo in lexicon:
+    if isinstance(foo, State):
+      print (" "*indent) + ("State: %s" % str((foo.name)))
+      printLexiconTree(foo.tokens,indent+2)
+    elif type(foo) == type(()) and len(foo) == 2:
+      print " "*indent + str(foo[0])
+    else:
+      print " "*indent + str(foo)
+

Added: trunk/gnue-common/src/formatting/masks/test.py
===================================================================
--- trunk/gnue-common/src/formatting/masks/test.py      2004-04-16 23:41:18 UTC 
(rev 5734)
+++ trunk/gnue-common/src/formatting/masks/test.py      2004-04-16 23:42:52 UTC 
(rev 5735)
@@ -0,0 +1,74 @@
+import gnue
+import string, locale
+from Masks import InputMask
+
+locale.setlocale(locale.LC_ALL,'')
+
+def formatOutput(output, cursor):
+  output += "'"
+  output = output[:cursor] + chr(27) + '[7m' + \
+           output[cursor:cursor+1] + chr(27) + '[0m' + output[cursor+1:]
+  return "'" + output
+
+
+m='"Date:" M/D/y'
+mask = InputMask(m)
+print "Mask: %s" % m
+mask.begin()
+for f in ('','1','12','123','1234','12345','9999'):
+  print string.ljust("Input: '%s'" % f, 18),
+  output, cursor = mask._parseInput(newtext='%s'%f)
+  print "Output: " + formatOutput(output, cursor)
+
+
+m='\\$###,##0!.00'
+mask = InputMask(m)
+print "Mask: %s" % m
+mask.begin()
+for f in ('','1','12','123','1234','12345','9999'):
+  print string.ljust("Input: '%s'" % f, 18),
+  output, cursor = mask._parseInput(newtext='%s'%f)
+  print "Output: " + formatOutput(output, cursor)
+
+
+exit
+# Commands:
+#  < Left arrow
+#  > right arrow
+#  { delete left
+#  } delete right
+#  ^ Home
+#  v End
+#    Anything else: As is
+t = "9311<<^v"
+
+output, cursor = mask.begin()
+print "Init: " + formatOutput(output, cursor)
+
+for c in t:
+  print "-----------"
+  if c == "<":
+    print string.ljust("Left.", 18),
+    output, cursor = mask.moveLeft()
+  elif c == '>':
+    print string.ljust("Right.", 18),
+    output, cursor = mask.moveRight()
+  elif c == '^':
+    print string.ljust("Home.", 18),
+    output, cursor = mask.moveHome()
+  elif c == 'v':
+    print string.ljust("End.", 18),
+    output, cursor = mask.moveEnd()
+  elif c == '{':
+    print string.ljust("Backspace.", 18),
+    output, cursor = mask.backspace()
+  elif c == '}':
+    print string.ljust("Delete.", 18),
+    output, cursor = mask.delete()
+  else:
+    print string.ljust("Type char %s" % c, 18),
+    output, cursor = mask.add(c)
+
+  print "Output: " + formatOutput(output, cursor)
+
+





reply via email to

[Prev in Thread] Current Thread [Next in Thread]