#@+leo-ver=5-thin
#@+node:ekr.20170302123956.1: * @file ../doc/leoAttic.txt
# This is Leo's final resting place for dead code.
# Much easier to access than a git repo.

#@@language python
#@@killbeautify
#@+all
#@+node:ekr.20170221033229.1: ** Unused files
@language python
#@+node:ekr.20150514040234.1: *3* COPY  ../commands/registerCommands.py
@first # -*- coding: utf-8 -*-
'''Leo's register commands.'''
import leo.core.leoGlobals as g
from leo.commands.baseCommands import BaseEditCommandsClass as BaseEditCommandsClass

def cmd(name):
    '''Command decorator for the RegisterCommandsClass class.'''
    return g.new_cmd_decorator(name, ['c', 'registerCommands',])

@others
#@+node:ekr.20160514120143.1: *4* class RegisterCommandsClass
class RegisterCommandsClass(BaseEditCommandsClass):
    '''Create registers a-z and the corresponding Emacs commands.'''
    @others
#@+node:ekr.20150514063305.463: *5* register.ctor
def __init__(self, c):
    '''Ctor for RegisterCommandsClass class.'''
    # pylint: disable=super-init-not-called
    self.c = c
    self.methodDict, self.helpDict = self.addRegisterItems()
    # Init these here to keep pylint happy.
    self.method = None
    self.registerMode = 0 # Must be an int.
    self.registers = g.app.globalRegisters
#@+node:ekr.20150514063305.465: *5* register.addRegisterItems
def addRegisterItems(self):
    methodDict = {
        '+': self.incrementRegister,
        ' ': self.pointToRegister,
        'a': self.appendToRegister,
        'i': self.insertRegister,
        'j': self.jumpToRegister,
        # 'n':        self.numberToRegister,
        'p': self.prependToRegister,
        'r': self.copyRectangleToRegister,
        's': self.copyToRegister,
        'v': self.viewRegister,
    }
    helpDict = {
        's': 'copy to register',
        'i': 'insert from register',
        '+': 'increment register',
        'n': 'number to register',
        'p': 'prepend to register',
        'a': 'append to register',
        ' ': 'point to register',
        'j': 'jump to register',
        'r': 'rectangle to register',
        'v': 'view register',
    }
    return methodDict, helpDict
#@+node:ekr.20150514063305.466: *5* register.checkBodySelection
def checkBodySelection(self, warning='No text selected'):
    return self._chckSel(event=None, warning=warning)
#@+node:ekr.20150514063305.467: *5* register.Entries
#@+node:ekr.20150514063305.468: *6* appendToRegister
@cmd('register-append-to')
def appendToRegister(self, event):
    '''Prompt for a register name and append the selected text to the register's contents.'''
    c, k = self.c, self.c.k
    tag = 'append-to-register'
    state = k.getState(tag)
    char = event and event.char or ''
    if state == 0:
        k.commandName = tag
        k.setLabelBlue('Append to Register: ')
        k.setState(tag, 1, self.appendToRegister)
    else:
        k.clearState()
        if self.checkBodySelection():
            if char.isalpha():
                w = c.frame.body.wrapper
                c.bodyWantsFocus()
                key = char.lower()
                val = self.registers.get(key, '')
                val = val + w.getSelectedText()
                self.registers[key] = val
                k.setLabelGrey('Register %s = %s' % (key, repr(val)))
            else:
                k.setLabelGrey('Register must be a letter')
    c.bodyWantsFocus()
#@+node:ekr.20150514063305.469: *6* prependToRegister
@cmd('register-prepend-to')
def prependToRegister(self, event):
    '''Prompt for a register name and prepend the selected text to the register's contents.'''
    c, k = self.c, self.c.k
    tag = 'prepend-to-register'
    state = k.getState(tag)
    char = event and event.char or ''
    if state == 0:
        k.commandName = tag
        k.setLabelBlue('Prepend to Register: ')
        k.setState(tag, 1, self.prependToRegister)
    else:
        k.clearState()
        if self.checkBodySelection():
            if char.isalpha():
                w = c.frame.body.wrapper
                c.bodyWantsFocus()
                key = char.lower()
                val = self.registers.get(key, '')
                val = w.getSelectedText() + val
                self.registers[key] = val
                k.setLabelGrey('Register %s = %s' % (key, repr(val)))
            else:
                k.setLabelGrey('Register must be a letter')
    c.bodyWantsFocus()
#@+node:ekr.20150514063305.470: *6* copyRectangleToRegister
@cmd('register-copy-rectangle-to')
def copyRectangleToRegister(self, event):
    '''
    Prompt for a register name and append the rectangle defined by selected
    text to the register's contents.
    '''
    c, k = self.c, self.c.k
    state = k.getState('copy-rect-to-reg')
    char = event and event.char or ''
    if state == 0:
        self.w = self.editWidget(event)
        if self.w:
            k.commandName = 'copy-rectangle-to-register'
            k.setLabelBlue('Copy Rectangle To Register: ')
            k.setState('copy-rect-to-reg', 1, self.copyRectangleToRegister)
    elif self.checkBodySelection('No rectangle selected'):
        k.clearState()
        if char.isalpha():
            key = char.lower()
            w = self.w
            c.widgetWantsFocusNow(w)
            r1, r2, r3, r4 = self.getRectanglePoints(w)
            rect = []
            while r1 <= r3:
                txt = w.get('%s.%s' % (r1, r2), '%s.%s' % (r1, r4))
                rect.append(txt)
                r1 = r1 + 1
            self.registers[key] = rect
            k.setLabelGrey('Register %s = %s' % (key, repr(rect)))
        else:
            k.setLabelGrey('Register must be a letter')
    c.bodyWantsFocus()
#@+node:ekr.20150514063305.471: *6* copyToRegister
@cmd('register-copy-to')
def copyToRegister(self, event):
    '''Prompt for a register name and append the selected text to the register's contents.'''
    c, k = self.c, self.c.k
    tag = 'copy-to-register'
    state = k.getState(tag)
    char = event and event.char or ''
    if state == 0:
        k.commandName = tag
        k.setLabelBlue('Copy to Register: ')
        k.setState(tag, 1, self.copyToRegister)
    else:
        k.clearState()
        if self.checkBodySelection():
            if char.isalpha():
                key = char.lower()
                w = c.frame.body.wrapper
                c.bodyWantsFocus()
                val = w.getSelectedText()
                self.registers[key] = val
                k.setLabelGrey('Register %s = %s' % (key, repr(val)))
            else:
                k.setLabelGrey('Register must be a letter')
    c.bodyWantsFocus()
#@+node:ekr.20150514063305.472: *6* incrementRegister
@cmd('register-increment')
def incrementRegister(self, event):
    '''Prompt for a register name and increment its value if it has a numeric value.'''
    c, k = self.c, self.c.k
    state = k.getState('increment-reg')
    char = event and event.char or ''
    if state == 0:
        k.setLabelBlue('Increment register: ')
        k.setState('increment-reg', 1, self.incrementRegister)
    else:
        k.clearState()
        if self.checkIfRectangle(event):
            pass # Error message is in the label.
        elif char.isalpha():
            key = char.lower()
            val = self.registers.get(key, 0)
            try:
                val = str(int(val) + 1)
                self.registers[key] = val
                k.setLabelGrey('Register %s = %s' % (key, repr(val)))
            except ValueError:
                k.setLabelGrey("Can't increment register %s = %s" % (key, val))
        else:
            k.setLabelGrey('Register must be a letter')
    c.bodyWantsFocus()
#@+node:ekr.20150514063305.473: *6* insertRegister
@cmd('register-insert')
def insertRegister(self, event):
    '''Prompt for a register name and and insert the value of another register into its contents.'''
    c, k = self.c, self.c.k
    state = k.getState('insert-reg')
    char = event and event.char or ''
    if state == 0:
        k.commandName = 'insert-register'
        k.setLabelBlue('Insert register: ')
        k.setState('insert-reg', 1, self.insertRegister)
    else:
        k.clearState()
        if char.isalpha():
            w = c.frame.body.wrapper
            c.bodyWantsFocus()
            key = char.lower()
            val = self.registers.get(key)
            if val:
                if isinstance(val, list):
                    c.rectangleCommands.yankRectangle(val)
                else:
                    i = w.getInsertPoint()
                    w.insert(i, val)
                k.setLabelGrey('Inserted register %s' % key)
            else:
                k.setLabelGrey('Register %s is empty' % key)
        else:
            k.setLabelGrey('Register must be a letter')
    c.bodyWantsFocus()
#@+node:ekr.20150514063305.474: *6* jumpToRegister
@cmd('register-jump-to')
def jumpToRegister(self, event):
    '''Prompt for a register name and set the insert point to the value in its register.'''
    c, k = self.c, self.c.k
    state = k.getState('jump-to-reg')
    char = event and event.char or ''
    if state == 0:
        k.setLabelBlue('Jump to register: ')
        k.setState('jump-to-reg', 1, self.jumpToRegister)
    else:
        k.clearState()
        if char.isalpha():
            if self.checkIfRectangle(event): return
            key = char.lower()
            val = self.registers.get(key)
            w = c.frame.body.wrapper
            c.bodyWantsFocus()
            if val:
                try:
                    w.setInsertPoint(val)
                    k.setLabelGrey('At %s' % repr(val))
                except Exception:
                    k.setLabelGrey('Register %s is not a valid location' % key)
            else:
                k.setLabelGrey('Register %s is empty' % key)
    c.bodyWantsFocus()
#@+node:ekr.20150514063305.475: *6* numberToRegister (not used)
@
C-u number C-x r n reg
    Store number into register reg (number-to-register).
C-u number C-x r + reg
    Increment the number in register reg by number (increment-register).
C-x r g reg
    Insert the number from register reg into the buffer.
@c

def numberToRegister(self, event):
    k = self.c.k
    state = k.getState('number-to-reg')
    char = event and event.char or ''
    if state == 0:
        k.commandName = 'number-to-register'
        k.setLabelBlue('Number to register: ')
        k.setState('number-to-reg', 1, self.numberToRegister)
    else:
        k.clearState()
        if char.isalpha():
            # self.registers[char.lower()] = str(0)
            k.setLabelGrey('number-to-register not ready yet.')
        else:
            k.setLabelGrey('Register must be a letter')
#@+node:ekr.20150514063305.476: *6* pointToRegister
@cmd('register-point-to')
def pointToRegister(self, event):
    '''Prompt for a register name and put a value indicating the insert point in the register.'''
    c, k = self.c, self.c.k
    state = k.getState('point-to-reg')
    char = event and event.char or ''
    if state == 0:
        k.commandName = 'point-to-register'
        k.setLabelBlue('Point to register: ')
        k.setState('point-to-reg', 1, self.pointToRegister)
    else:
        k.clearState()
        if char.isalpha():
            w = c.frame.body.wrapper
            c.bodyWantsFocus()
            key = char.lower()
            val = w.getInsertPoint()
            self.registers[key] = val
            k.setLabelGrey('Register %s = %s' % (key, repr(val)))
        else:
            k.setLabelGrey('Register must be a letter')
    c.bodyWantsFocus()
#@+node:ekr.20150514063305.477: *6* viewRegister
@cmd('register-view')
def viewRegister(self, event):
    '''Prompt for a register name and print its contents.'''
    c, k = self.c, self.c.k
    state = k.getState('view-reg')
    char = event and event.char or ''
    if state == 0:
        k.commandName = 'view-register'
        k.setLabelBlue('View register: ')
        k.setState('view-reg', 1, self.viewRegister)
    else:
        k.clearState()
        if char.isalpha():
            key = char.lower()
            val = self.registers.get(key)
            k.setLabelGrey('Register %s = %s' % (key, repr(val)))
        else:
            k.setLabelGrey('Register must be a letter')
    c.bodyWantsFocus()
#@+node:ekr.20150514043714.12: *5* register.checkIfRectangle
def checkIfRectangle(self, event):
    k = self.c.k
    key = event and event.char.lower() or ''
    val = self.registers.get(key)
    if isinstance(val, list):
        k.clearState()
        k.setLabelGrey("Register contains Rectangle, not text")
        return True
    else:
        return False
#@+node:ekr.20150514040144.1: *3* COPY ../commands/macroCommands.py
@first # -*- coding: utf-8 -*-
'''Leo's macros commands.'''
<< imports >>

def cmd(name):
    '''Command decorator for the MacroCommandsClass class.'''
    return g.new_cmd_decorator(name, ['c', 'macroCommands',])

@others
#@+node:ekr.20150514050425.1: *4* << imports >> (macroCommands.py)
import leo.core.leoGlobals as g
from leo.commands.baseCommands import BaseEditCommandsClass as BaseEditCommandsClass
#@+node:ekr.20160514120837.1: *4* class MacroCommandsClass
class MacroCommandsClass(BaseEditCommandsClass):
    '''Records, plays, saves and restores keyboard macros.'''
    @others
#@+node:ekr.20150514063305.432: *5* macro.ctor
def __init__(self, c):
    '''Ctor for MacroCommandsClass class.'''
    # pylint: disable=super-init-not-called
    self.c = c
    self.lastMacro = None
    self.macros = []
    self.macro = []
    self.namedMacros = {}
    # Important: we must not interfere with k.state in startRecordingMacro!
    self.recordingMacro = False
#@+node:ekr.20150514063305.434: *5* callLastMacro
# Called from universal-command.

@cmd('macro-call-last')
def callLastMacro(self, event=None):
    '''Call the last recorded keyboard macro.'''
    # g.trace(self.lastMacro)
    if self.lastMacro:
        self.executeMacro(self.lastMacro)
#@+node:ekr.20150514063305.435: *5* callNamedMacro
@cmd('macro-call')
def callNamedMacro(self, event):
    '''Prompts for a macro name, then executes it.'''
    k = self.c.k
    k.setLabelBlue('Call macro named: ')
    k.get1Arg(event, handler=self.callNamedMacro1)
        
def callNamedMacro1(self, event):
    k = self.c.k
    macro = self.namedMacros.get(k.arg)
    # Must do this first!
    k.clearState()
    if macro:
        self.executeMacro(macro)
    else:
        g.es('no macro named %s' % k.arg)
        k.resetLabel()
#@+node:ekr.20150514063305.436: *5* completeMacroDef
def completeMacroDef(self, name, macro):
    '''
    Add the macro to the list of macros, and add the macro's name to
    c.commandsDict.
    '''
    # Called from loadFile and nameLastMacro.
    trace = False and not g.unitTesting
    c = self.c
    if trace:
        g.trace('macro::%s' % (name))
        for event in macro:
            g.trace(event.stroke)

    def func(event, macro=macro):
        return self.executeMacro(macro)

    if name in c.commandsDict:
        g.es_print('over-riding command: %s' % (name))
    else:
        g.es_print('loaded: %s' % (name))
    c.commandsDict[name] = func
    self.namedMacros[name] = macro
#@+node:ekr.20150514063305.437: *5* endMacro
@cmd('macro-end-recording')
def endMacro(self, event=None):
    '''Stops recording a macro.'''
    k = self.c.k
    self.recordingMacro = False
        # Tell k.masterKeyHandler and k.masterCommandHandler we are done.
    if self.macro:
        # self.macro = self.macro [: -4]
        self.macros.insert(0, self.macro)
        self.lastMacro = self.macro[:]
        self.macro = []
        k.setLabelBlue('Keyboard macro defined, not named')
        # g.es('Keyboard macro defined, not named')
    else:
        k.setLabelBlue('Empty keyboard macro')
        # g.es('Empty keyboard macro')
#@+node:ekr.20150514063305.438: *5* executeMacro
def executeMacro(self, macro):
    trace = False and not g.unitTesting
    c, k = self.c, self.c.k
    c.bodyWantsFocus()
    for event in macro:
        if trace: g.trace(repr(event))
        k.masterKeyHandler(event)
#@+node:ekr.20150514063305.439: *5* getMacrosNode
def getMacrosNode(self):
    '''Return the position of the @macros node.'''
    c = self.c
    for p in c.all_unique_positions():
        if p.h == '@macros':
            return p
    # Not found.
    for p in c.all_unique_positions():
        if p.h == '@settings':
            # Create as the last child of the @settings node.
            p2 = p.insertAsLastChild()
            break
    else:
        # Create as the root node.
        oldRoot = c.rootPosition()
        p2 = oldRoot.insertAfter()
        p2.moveToRoot(oldRoot)
    c.setHeadString(p2, '@macros')
    g.es_print('Created: %s' % p2.h)
    c.redraw()
    return p2
#@+node:ekr.20150514063305.440: *5* getWidgetName
def getWidgetName(self, obj):
    if not obj:
        return ''
    if hasattr(obj, 'objectName'):
        return obj.objectName()
    if hasattr(obj, 'widget'):
        if hasattr(obj.widget, 'objectName'):
            return obj.widget.objectName()
    return ''
#@+node:ekr.20150514063305.441: *5* loadMacros
@cmd('macro-load-all')
def loadMacros(self, event=None):
    '''Load macros from the @macros node.'''
    trace = False and not g.unitTesting
    c = self.c
    create_event = g.app.gui.create_key_event
    p = self.getMacrosNode()

    def oops(message):
        g.trace(message)

    lines = g.splitLines(p.b)
    i = 0
    macro = []; name = None
    while i < len(lines):
        progress = i
        s = lines[i].strip()
        i += 1
        if s.startswith('::') and s.endswith('::'):
            name = s[2: -2]
            if name:
                macro = []
                while i < len(lines):
                    s = lines[i].strip()
                    if trace: g.trace(repr(name), repr(s))
                    if s:
                        stroke = s
                        char = c.k.stroke2char(stroke)
                        w = c.frame.body.wrapper
                        macro.append(create_event(c, char, stroke, w))
                        i += 1
                    else: break
                # Create the entries.
                if macro:
                    self.completeMacroDef(name, macro)
                    macro = []; name = None
                else:
                    oops('empty expansion for %s' % (name))
        elif s:
            if s.startswith('#') or s.startswith('@'):
                pass
            else:
                oops('ignoring line: %s' % (repr(s)))
        else: pass
        assert progress < i
    # finish of the last macro.
    if macro:
        self.completeMacroDef(name, macro)
#@+node:ekr.20150514063305.442: *5* nameLastMacro
@cmd('macro-name-last')
def nameLastMacro(self, event):
    '''Prompts for the name to be given to the last recorded macro.'''
    k = self.c.k
    k.setLabelBlue('Name of macro: ')
    k.get1Arg(event, handler=self.nameLastMacro1)
        
def nameLastMacro1(self, event):
    k = self.c.k
    k.clearState()
    name = k.arg
    self.completeMacroDef(name, self.lastMacro)
    k.setLabelGrey('Macro defined: %s' % name)

#@+node:ekr.20150514063305.443: *5* printMacros & printLastMacro
@cmd('macro-print-all')
def printMacros(self, event=None):
    '''Prints the name and definition of all named macros.'''
    names = list(self.namedMacros.keys())
    if names:
        names.sort()
        print('macros', names)
        # g.es('\n'.join(names),tabName='Macros')
    else:
        g.es('no macros')

@cmd('macro-print-last')
def printLastMacro(self, event=None):
    '''Print the last (unnamed) macro.'''
    if self.lastMacro:
        for event in self.lastMacro:
            g.es(repr(event.stroke))
#@+node:ekr.20150514063305.444: *5* saveMacros
@cmd('macro-save-all')
def saveMacros(self, event=None):
    '''Store macros in the @macros node..'''
    p = self.getMacrosNode()
    result = []
    # g.trace(list(self.namedMacros.keys()))
    for name in self.namedMacros:
        macro = self.namedMacros.get(name)
        result.append('::%s::' % (name))
        for event in macro:
            if 0:
                w_name = self.getWidgetName(event.w)
                result.append('%s::%s::%s' % (repr(event.char), event.stroke, w_name))
            result.append(event.stroke)
        result.append('') # Blank line terminates
    p.b = '\n'.join(result)
#@+node:ekr.20150514063305.445: *5* startRecordingMacro
@cmd('macro-start-recording')
def startRecordingMacro(self, event):
    '''Start recording or continue to record a macro.'''
    trace = False and not g.unitTesting
    k = self.c.k
    if event:
        if self.recordingMacro:
            if trace: g.trace('stroke', event.stroke)
            self.macro.append(event)
        else:
            self.recordingMacro = True
            k.setLabelBlue('Recording macro. ctrl-g to end...', protect=True)
            # g.es('Recording macro. ctrl-g to end...')
    else:
        g.trace('can not happen: no event')
#@+node:ekr.20170123092100.1: *3* COPY ../plugins/qt_big_text.py
"""Leo aware Qt Dialog for delaying loading of big text"""
import leo.core.leoGlobals as g
from leo.core.leoQt import QtWidgets
import leo.plugins.qt_text as qt_text
@others
@language python
@tabwidth -4
@pagewidth 70
#@+node:ekr.20170123092100.2: *4* class BigTextController
class BigTextController(object):
    @others
#@+node:ekr.20170123092100.3: *5* btc.__init__
def __init__(self, c):
    '''Ctor for BigTextController.'''
    self.active_flag = None # True: warning text/buttons are visible.
    self.c = c
    self.inhibit = set() # Set of inhibited vnodes.
    self.layout = None
    self.old_p = None
    self.old_w = None # A LeoQTextBrowser.
    self.p = None
    self.parent = None
    self.s = None
    self.w = None
#@+node:ekr.20170123092100.4: *5* btc.add_buttons
def add_buttons(self, old_p, p):
    '''Init the big text controller for node p.'''
    c = self.c
    w = c.frame.body.wrapper.widget
    parent = w.parent() # A QWidget
    layout = parent.layout()
    # Set ivars
    self.active_flag = True
    self.layout = layout
    self.old_p = old_p
    self.old_w = w # A LeoQTextBrowser.
    self.p = p
    self.parent = parent
    self.s = p.b
    self.widgets = {}
        # Keys are strings, values are buttons.
    if p.v not in self.inhibit:
        self.create_widgets()
            # Create the big-text widgets.
    # g.trace('----- (LeoBigTextDialog)',len(self.s),self.w)
#@+node:ekr.20170123092100.5: *5* btc.create_widgets
def create_widgets(self):
    '''Create the big-text buttons and text warning area.'''
    c = self.c
    self.active_flag = True
    warning = self.warning_message()
    if 1: # essential
        self.old_w.setPlainText(self.p.b)
    else: # can lose data.
        self.old_w.setPlainText(
            '@nocolor-node\n\nBig text not loaded: %s characters. Limit is %s' % (
            len(self.p.b), c.max_pre_loaded_body_chars))
    self.w = w = QtWidgets.QWidget() # No parent needed.
    layout = QtWidgets.QVBoxLayout() # No parent needed.
    w.setLayout(layout)
    w.text = tw = QtWidgets.QTextBrowser()
    tw.setText(warning)
    tw.setObjectName('bigtextwarning')
    self.widgets['bigtextwarning'] = tw
    layout.addWidget(tw)
    table = [
            ('remove', 'Remove These Buttons', self.remove),
            ('load_nc', 'Load Text With @killcolor', self.load_nc),
            ('more', 'Double limit for this session', self.more),
            ('copy', 'Copy body to clipboard', self.copy),
    ]
    if self.s.startswith('@killcolor'):
        del table[1]
    for key, label, func in table:
        self.widgets[key] = button = QtWidgets.QPushButton(label)
        layout.addWidget(button)

        def button_callback(checked, func=func):
            func()

        button.clicked.connect(button_callback)
    # layout.addItem(QtWidgets.QSpacerItem(
        # 10, 10, vPolicy=QtWidgets.QSizePolicy.Expanding))
    self.layout.addWidget(w)
    w.show()
#@+node:ekr.20170123092100.6: *5* btc.copy
def copy(self):
    '''Copy self.s (c.p.b) to the clipboard.'''
    g.app.gui.replaceClipboardWith(self.s)
#@+node:ekr.20170123092100.7: *5* btc.go_away
def go_away(self):
    '''Delete all buttons and self.'''
    # g.trace(self.w or 'None')
    self.active_flag = False
    c = self.c
    if self.w:
        # Does not work.
        # self.old_w.setPlainText(self.p.b)
        self.layout.removeWidget(self.w)
        self.w.deleteLater()
        self.w = None
    c.bodyWantsFocusNow()
#@+node:ekr.20170123092100.8: *5* btc.is_qt_body
def is_qt_body(self):
    '''Return True if the body widget is a QTextEdit.'''
    c = self.c
    w = c.frame.body.wrapper.widget
    val = isinstance(w, qt_text.LeoQTextBrowser)
        # c.frame.body.wrapper.widget is a LeoQTextBrowser.
        # c.frame.body.wrapper is a QTextEditWrapper or QScintillaWrapper.
    # g.trace(self.c.shortFileName(),val)
    return val
#@+node:ekr.20170123092100.9: *5* btc.is_big_text
def is_big_text(self, p):
    '''True if p.b is large and the text widget supports big text buttons.'''
    c = self.c
    if c.max_pre_loaded_body_chars > 0:
        wrapper = c.frame.body.wrapper
        w = wrapper and wrapper.widget
        val = w and len(p.b) > c.max_pre_loaded_body_chars
    else:
        val = False
    # g.trace(c.shortFileName(),p.h,val)
    return val
#@+node:ekr.20170123092100.10: *5* btc.load_nc
def load_nc(self):
    '''Load the big text with a leading @killcolor directive.'''
    c, p = self.c, self.c.p
    if not c.positionExists(p):
        return
    self.wait_message()
    # Recreate the entire select code.
    tag = "@killcolor\n"
    if not p.b.startswith(tag):
        p.b = tag + p.b
    w = self.c.frame.body.wrapper
    self.go_away()
    w.setInsertPoint(0)
    w.seeInsertPoint()
    c.bodyWantsFocusNow()
    c.recolor_now()
#@+node:ekr.20170123092100.11: *5* btc.more
def more(self):
    '''
    Double the big text limit for this session.
    Load the text if the text is less than this limit.
    '''
    c = self.c
    c.max_pre_loaded_body_chars *= 2
    if len(c.p.b) < c.max_pre_loaded_body_chars:
        self.wait_message()
        self.inhibit.add(c.p.v)
        self.go_away()
        c.selectPosition(self.p)
    else:
        tw = self.widgets.get('bigtextwarning')
        tw.setText(self.warning_message())
        g.es('limit is now: %s' % c.max_pre_loaded_body_chars)
#@+node:ekr.20170123092100.12: *5* btc.remove
def remove(self):
    '''Remove the buttons and inhibit them hereafter.'''
    c = self.c
    self.inhibit.add(c.p.v)
    self.go_away()
#@+node:ekr.20170123092100.13: *5* btc.should_add_buttons
def should_add_buttons(self, old_p, p):
    '''Return True if big-text buttons should be added.'''
    if g.app.unitTesting:
        return False # Don't add buttons during testing.
    if self.c.undoer.undoing:
        return False # Suppress buttons during undo.
    if self.active_flag:
        return False # Buttons already created.
    if p.v in self.inhibit:
        return False # Buttons are inhibited for this vnode.
    return self.is_big_text(p) and self.is_qt_body()
#@+node:ekr.20170123092100.14: *5* btc.should_go_away
def should_go_away(self, p):
    '''Return True if big-text buttons should be removed.'''
    if self.c.undoer.undoing:
        return False # Suppress buttons during undo.
    else:
        return self.active_flag and not self.is_big_text(p)
#@+node:ekr.20170123092100.15: *5* btc.wait_message
def wait_message(self):
    '''Issue a message asking the user to wait until all text loads.'''
    g.es(
        "Loading large text, please wait\n"
        "until scrollbar stops shrinking", color='red')
#@+node:ekr.20170123092100.16: *5* btc.warning_message
def warning_message(self):
    '''Return the warning message.'''
    c = self.c
    s = '''\
Loading big text: %s characters. Limit is %s.

Beware of a Qt bug: You will **lose data** if you change the text
before it is fully loaded (before the scrollbar stops moving).

To disable these buttons set @int max-pre-loaded-body-chars = 0
'''
    s = s.rstrip() % (len(self.s), c.max_pre_loaded_body_chars)
    return g.adjustTripleString(s, c.tab_width)
#@+node:ekr.20170215100221.1: ** Unused docs
@language rest
@wrap
#@+node:ekr.20170217031210.1: *3* COPY @file html/index.html
@first <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
@first "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
@first <!-- This is Leo's home page -->

@language html
@tabwidth -2

<html xmlns="http://www.w3.org/1999/xhtml">
<head>
    << head >>
</head>
<body>
  << div top nav >>
  <div class="document">
    <div class="documentwrapper">
      <div class="bodywrapper">
        <div class="body">
          << div main section >>
        </div>
      </div>
    </div>
    << sidebar >>
    <div class="clearer"></div>
  </div>
  << div bottom nav >>
  << div footer >>
</body>
</html>
#@+node:ekr.20170217031210.2: *4* << head >> (home page)
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />

<title>Leo's Home Page</title>
<link rel="stylesheet" href="_static/default.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />

<!-- scale screen-shot to fit parent container -->
<style type="text/css">
img { max-width:100%; max-height:100%; }
</style>

<!--
<script type="text/javascript">
  var DOCUMENTATION_OPTIONS = {
    URL_ROOT:    '',
    VERSION:     '5.4',
    COLLAPSE_MODINDEX: false,
    FILE_SUFFIX: '.html',
    HAS_SOURCE:  false
  };
</script>
-->
<< flattr head >>

<< feed2js stylesheet >>

<!-- <script type="text/javascript" src="_static/jquery.js"></script>-->
<!-- <script type="text/javascript" src="_static/doctools.js"></script>-->
<link rel="top" title="Leo 5.4 documentation" href="index.html" />
<link rel="Users Guide" title="Leo&#8217;s Users Guide" href="leo_toc.html" />
<!-- <link rel="prev" title="Using Leo's Commands" href="commands.html" />--> 

<script type="text/javascript">
function search(event) {
  if (event === null || (event.keyCode || event.which) === 13) {
    window.open('http://www.google.com/search?q=site:leoeditor.com+'+document.getElementById('q').value);
  }
}
</script>
#@+node:ekr.20170217031210.3: *5* << flattr head >>
<script type="text/javascript">
/* <![CDATA[ */
    (function() {
        var s = document.createElement('script'), t = document.getElementsByTagName('script')[0];
        s.type = 'text/javascript';
        s.async = true;
        s.src = 'http://api.flattr.com/js/0.6/load.js?mode=auto';
        t.parentNode.insertBefore(s, t);
    })();
/* ]]> */
</script>
#@+node:ekr.20170217031210.4: *5* << feed2js stylesheet >>
<style type="text/css" media="all">

<!-- See: http://feed2js.org/index.php?s=style -->

.rss_box {
    width:   0px;
    padding: 0px;
    border:  0px solid white;
    margin:  0px;
}

.rss_title {
    visibility:hidden;
    line-height: 0px;
}

.rss_items {
    font-size: 80%;
}

</style>
#@+node:ekr.20170217031210.5: *4* << div top nav >>
<div class="related">
  <h3>Navigation</h3>
  <ul>
    <li class="right" style="margin-right: 10px">
      <a href="genindex.html" title="General Index"
         accesskey="I">index</a></li>
    <li class="right" >
      <a href="glossary.html" title="Glossary"
         accesskey="G">glossary</a> |</li>
    <li class="right" >
      <a href="leo_toc.html" title="Users Guide"
         accesskey="N">contents</a> |</li>
    <li><a href="leo_toc.html">Leo 5.4 documentation</a></li>
    <!--  &raquo; -->
  </ul>
</div>
#@+node:ekr.20170217031210.6: *4* << div main section >> (home page)
<div class="section" id="Leo&#8217;s Home page">

    <h1>Leo&#8217;s Home Page</h1>
    << Leo is... >>
    << Leo Google group & postcard >>
    << screenshot >>
    << embed leo-editor >>
</div>
#@+node:ekr.20170217031210.7: *5* << Leo is... >>
<p><blockquote>
  "Leo is a revolutionary step in the right direction for programming."&#8213;Brian Takita
</blockquote>

<blockquote> "Leo is the best IDE that I have had the pleasure to use. It
has totally changed not only the way that I program, but also the way that
I store and organize all of the information that I need for the job that I
do."&#8213;Ian Mulvany
</blockquote>

<blockquote>
"When first I opened Leo, it was out of curiosity. But having used it, I'll
never go back. They'll have to pry Leo out of my cold, dead fingers!"&#8213;Travers A. Hough
</blockquote>
<p>
Leo is a
<a href="https://en.wikipedia.org/wiki/Personal_information_manager">PIM</a>, 
<a href="https://en.wikipedia.org/wiki/Integrated_development_environment">IDE</a>
and outliner that
<strong>accelerates the work flow</strong>
of programmers, authors and web designers.
Leo's unique features <strong>organize data in a revolutionary way</strong>:
<li>
  Leo outlines are views on an underlying
  <a href="https://en.wikipedia.org/wiki/Directed_acyclic_graph">graph</a>.
<li>
  Outline nodes can reside in <strong>many</strong> places within a single outline.</li>
<li>
  Leo is fully scriptable in <a HREF="http://www.python.org/">Python</a>.</li>
<li>
  Leo scripts have full access to Leo's source code and all outline data.</li>
<li>
  <a href="tutorial-programming.html">Outline-oriented markup</a> generates
  <a href="tutorial-basics.html#creating-external-files-with-file-and-all">external files</a>
  from outlines.</li>
</p>
<p>
  <a href="preliminaries.html#preface">
    <strong>Learn why Leo is special</strong></a>.
  <br>
  <a href="tutorial.html">
    <strong>Learn about Leo in 10 minutes</strong></a>.
</p>
<p>
Furthermore:
</p>
<li>
Leo is <a href="http://www.opensource.org/"><i>Open Software</i></a>,
written in pure <a HREF="http://www.python.org/">Python</a>.
<li>
Leo is a <a HREF="http://www.riverbankcomputing.co.uk/software/pyqt/intro">PyQt</a>
app that runs on Linux, Windows and MacOS.</li>
<li>
Leo works well with <a href="http://www.gnu.org/software/emacs/">emacs</a>,
  <a href="http://www.xemacs.org/">xemacs</a>,
  <a href="http://www.vim.org/">vim</a>, and
  <a href="http://ipython.org/">ipython</a>.</li>
</p>
#@+node:ekr.20170217031210.8: *5* << Leo Google group & postcard >>
<p>
<strong>We welcome your questions and comments!</strong>
<br>
Ask us anything at the medium traffic
<a href="http://groups.google.com/group/leo-editor"><strong>leo-editor Google Group</strong></a>.
<br>
If you use Leo, please
<a href="mailto:edreamleo@gmail.com?Subject=About%20Leo" target="_top">
  <strong>send us a postcard</strong></a>, telling how you use Leo. Thanks!
</p>

<!--
<p>Here are the most recent discussions...</p>
<script language="JavaScript"
  src="http://itde.vccs.edu/rss2js/feed2js.php?src=http%3A%2F%2Fgroups.google.com%2Fgroup%2Fleo-editor%2Ffeed%2Frss_v2_0_msgs.xml&chan=title&num=5&desc=0&date=y&targ=y"
  type="text/javascript">
</script>
-->
#@+node:ekr.20170217031210.9: *5* << screenshot >>
<p>Here is a screenshot of Leo's main window:</p>

<!-- actual size: 1152 x 648 -->
<img class="screenshot"
    src="screen-shots/leo-workbook.png"
    alt="ScreenShot"/>

<!-- <img class="screenshot"
    src="screen-shots/render-svg-sources.png"
    width="791.2px" height="692.8px"
    alt="ScreenShot"/>
-->
#@+node:ekr.20170217031210.10: *5* << embed leo-editor >>
<!-- This works --
<iframe id="forum_embed"
    src="javascript:void(0)"
    scrolling="yes"
    frameborder="1"
    width="900"
    height="700">
</iframe>
<script type="text/javascript">
  document.getElementById('forum_embed').src =
     'https://groups.google.com/forum/embed/?place=forum/leo-editor'
     + '&showsearch=true&showpopout=true&showtabs=true'
     + '&parenturl=' + encodeURIComponent(window.location.href);
</script>
-->
#@+node:ekr.20170217031210.11: *4* << sidebar >> (home page)
<div class="sphinxsidebar">
    <div class="sphinxsidebarwrapper">
        << sidebar contents >>
    </div>
</div>
#@+node:ekr.20170217031210.12: *5* << sidebar contents >> (home page)
@language html

<p class="logo"><a href="leo_toc.html">
  <img class="logo" src="_static/Leo4-80-border.jpg" alt="Logo"/></a>
</p>

<p>
  <a class="reference external" href="leo_toc.html">Table of contents</a>: click the lion!
</p>
<p>
<form>
  <!-- background-color:skyblue3 -->
  <!-- border-style:solid;border-color:#87CEEB -->
  <input type="button"
    style="background-color:#6CA6CD;"
    value="Download Leo"
    onClick="window.location.href='download.html'">
</form>
</p>

<a class="reference external" href="screencasts.html">
  Videos about Leo</a><br>

<!---
<a href="http://www.youtube.com/watch?v=Zu6J-J0qFi0">
  Introductory video</a><br>
<a href="https://vimeo.com/77720098 ">
  Bookmarks plugin video</a><br>
-->
  
<a class="reference external" href="tutorial.html">
  Leo's Tutorial</a><br>

<a class="reference external" href="leo_toc.html">
  Leo's Documentation</a><br>
  
<a class="reference external" href="http:screen-shots.html">
  Screen shots of Leo</a><br>
  
<a class="reference external" href="testimonials.html">
  Quotes from Leo&#8217;s Users</a><br>
  
<a class="reference external" href="slides.html">
  Tutorial Slide Shows</a><br>
  
<a class="reference external" href="http://webchat.freenode.net/?channels=%23leo&uio=d4">
  #leo IRC</a><br>

<a class="reference external" href="http://groups.google.com/group/leo-editor">
  leo-editor: Google Groups</a><br>
  
<a class="reference external" href="https://github.com/leo-editor">
  leo-editor: GitHub</a><br>
  
<a class="reference external" href="https://github.com/leo-editor/snippets">
  leo-editor: Code Snippets</a><br>

<a class="reference external" href="https://github.com/leo-editor/leo-editor/issues">
  Report a bug</a><br>
  
<a class="reference external" href="http://leo.zwiki.org">
  Leo&#8217;s Wiki</a><br>
  
<a class="reference external" href="http://leo-editor.readthedocs.org/en/latest/">
  Leo&#8217;s API documentation</a><br>
  
<a class="reference external" href="http://www.mind-mapping.org/">
  Mind Mapping</a><br>

<a class="reference external" href="leoLinks.html">
  More links...</a><br>
  
</p>
<p>
<div id="searchbox" style="">
  <h3>Search</h3>
    <input id='q' type="text" name="q" onkeypress="search(event)">
  <div><button onclick="search(null)">Google search</button></div>
  <div class="searchtip" style="font-size: 90%">
    Use quotes for "@file" and similar symbols.
  </div>
</div>
</p>

<p>Leo: Leonine Editor with Outlines</p>

<p>Written by <a HREF="ekr.html">Edward K. Ream</a>
<< flattr icon >>
</p>
#@+node:ekr.20170217031210.13: *6* << flattr icon >>
<a class="FlattrButton" style="display:none;" rev="flattr;button:compact;"
href="http://leoeditor.com"></a>
<noscript>
<a href="http://flattr.com/thing/410073/Leo" target="_blank">
<img src="http://api.flattr.com/button/flattr-badge-large.png"
alt="Flattr this" title="Flattr this" border="0" />
</a>
</noscript>
#@+node:ekr.20170217031210.14: *4* << div bottom nav >>
<div class="related">
  <h3>Navigation</h3>
  <ul>
    <li class="right" style="margin-right: 10px">
      <a href="genindex.html" title="General Index"
         >index</a></li>
    <li class="right" >
      <a href="glossary.html" title="Glossary"
         accesskey="G">glossary</a> |</li>
    <li class="right" >
      <a href="leo_toc.html" title="Users Guide"
         >contents</a> |</li>

    <li><a href="leo_toc.html">Leo 5.3 documentation</a></li> 
  </ul>
</div>
#@+node:ekr.20170217031210.15: *4* << div footer >>
<div class="footer">
  &copy; Copyright 1997-2016, Edward K. Ream.
  Last updated on May 1, 2016.
  Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.1.
</div>
#@+node:ekr.20170216014459.1: *3* @rst html\tutorial-programming.html
@wrap

.. |---| unicode:: U+02015 .. for quotes
   :trim:
   
####################
Programming with Leo
####################

    "We who use Leo know that it is a breakthrough tool and a whole new way
    of writing code."---Joe Orr

Now we come to the programming features that distinguish Leo from other programming environments. Several kinds of nodes create external files. This tutorial discusses four of them: @clean, @file, @edit and @auto. @clean is recommended for most purposes.

Please study this section carefully if you intend to use Leo for programming. If you get stuck, please `ask for help <https://groups.google.com/forum/#!forum/leo-editor>`_ immediately.

.. contents::
    :depth: 3
#@+node:ekr.20170216014459.2: *4* Using @clean
.. tutorial-programming.html

.. index::
    pair: @clean; Tutorial

Leo creates **external files** (files on your file system) from **@clean nodes** and their *descendant* nodes. Examples::

    @clean spam.py
    @clean ../foo.c
    @clean ~/bar.py

A single Leo outline may contain many @clean nodes. Leo outlines are like project files in other IDE's (Integrated Development Environments).

.. index::
    pair: Import Script; Tutorial
    pair: import-file command; Tutorial

Leo's import-file command creates a single @clean tree using the same importers that @auto uses. When importing several files it is more convenient to use a **import script**. Leo's `Scripting Tutorial <tutorial-scripting.html>`_ discusses such scripts.
#@+node:ekr.20170216014459.3: *5* Example
.. index::
    pair: Markup; Tutorial
    
Simple text markup tells Leo how to create an external file from an
@clean node and its descendants. Let's look at a step-by-step example:

1. Create a new outline node whose headline is::

    @clean myfile.py

2. Type this in the body of the @clean node::
    
    @language python
    @tabwidth -4
    << docstring >>
    @others
    if __name__ == '__main__':
        main()

3. Create a child of the @clean node whose headline is::
   
   << docstring >>

4. Type this in the child's body text::

        '''This is the docstring for the myfile module.'''
    
5. Create another child of the @clean node whose headline is::

        main
    
6. Type this in the body of the node::

        def main():
            print('hello from main')
        
7. Save the outline as myfile.leo. This creates myfile.py automatically.
   
8. Quit Leo.

9. Open myfile.py in your favorite text editor. You will see::

        '''This is the docstring for the myfile module.'''
        def main():
            print('hello from main')
        if __name__ == '__main__':
            main()

Usually you will edit myfile.py from Leo, but Leo can update the outline
from changes you make in the text editor!

10. In your text editor, change the line::

        print('hello from main')
        
to::

        print('hello AGAIN from main')
        
11. Save myfile.py and quit your text editor.

12. Open myfile.leo. Leo reports that the 'main' node has been changed. It's body text is now::
    
        def main():
            print('hello AGAIN from main')
        
Automatic updates of @clean nodes is an important feature of Leo. We'll see
how this works later. But first, the next section discusses Leo's markup in
detail.
#@+node:ekr.20170216014459.4: *5* Markup
.. index::
    pair: Section Name; Tutorial
    pair: Section Definition Node; Tutorial
    pair: Section Reference; Tutorial
    pair: Expansion; Tutorial
    pair: Markup; Tutorial
    pair: @others; Tutorial
    
This section discusses Leo's markup. This markup tells how to create
external files from @clean trees.

**Essential Terms**:

- A **section name** is any text of the form: << any text >>.
  (>> must not appear in "any text".)

- A **section definition node** is any node whose headline starts with a
  section name.

- A **section reference** is a section name that appears in body text.

| Leo creates external files containing @others directives by writing the *expansion* of the @clean node.
| The **expansion** of *any* node is the node's body text after making these text **substitutions**:

1. Leo replaces @others with the *expansion* of all descendant nodes
   **except** section definition nodes. That's how @others got its name.

2. Leo replaces section references by the *expansion* of the body text of
   the corresponding section definition node.
   
Whitespace is significant before @others and section references. Leo adds
the leading whitespace appearing before each @others directive or section
reference to every line of their expansion. As a result, Leo can generate
external files even for Python.  For example, here is a cute trick::

    if 1:
        << a section >>
    if 0:
        @others

**Notes**:

- Neither whitespace nor capitalization are important in section names.
  The following section names are equivalent::
  
      << a section >>
      <<ASection>>

- Any node may contain a *single* @others directive. No node may contain more
  than one @others directive.

- Nodes that *aren't* section definition nodes are included in the expansion
  of the *nearest* ancestor node containing an @others directive.
  
.. index::
    pair: Orphan Node; Tutorial

- An **orphan node** is a descendant of an @clean node not included in any
  substitution. Leo refuses to write external files containing orphan
  nodes. Instead, Leo writes the @clean tree to the .leo file itself,
  thereby preserving all data.

**Example 1**: The body of the @clean node for a typical Python module will
look something like::

    '''A docstring.'''
    << imports >>
    @others
    if __name__ == '__main__':
        main()
        
**Example 2**:  Here is a typical Python class definition in Leo::

    class MyClass:
        '''A docstring.'''
        @others
#@+node:ekr.20170216014459.5: *5* Updating @clean trees
Leo 5.1 introduced the ability to update @clean trees from changes made to the corresponding external file. This is one of the most important developments in Leo's history. Previously, Leo could perform such updates only if the external file contained **sentinel lines**, comment lines corresponding to Leo markup that indicated outline structure.

.. index::
    pair: Mulder/Ream update algorithm; Tutorial
    pair: Update algorithm; Tutorial
    
The update algorithm is a clever three-way diff/merge of lines computed
from the outline and the external files. You don't have to know how it
works in order to use Leo. It is `explained here <http://leoeditor.com/appendices.html#the-mulder-ream-update-algorithm>`_.

In most cases, the update algorithm "just works". However, there is one
edge case that you should be aware of.

**Ambiguous lines** are lines that could be placed either at the end of a
node or at the beginning of the following node. The update algorithm must
**guess** where to place such lines--there simply is not enough data to
know for sure. The algorithm always places ambiguous lines at the end of
the previous node because it's more common to add lines to the end of
nodes.

The update algorithm is **sound** because guesses do not affect the
resulting external file. That is, the contents of the external file will be
the same whether an ambiguous line is placed at the end of one node or the
beginning of the next.

The exact placement of ambiguous lines may not matter as far as the
contents of the external files is concerned, but it will certainly matter
to you! You will want lines placed in their proper nodes.

Happily, Leo reports which nodes have been updated. You can override
guesses manually by moving lines to a new node. Once you save the outline,
the lines will no longer become ambiguous. The next time the update
algorithm runs, it will no longer have to guess where the lines belong!
#@+node:ekr.20170216014459.6: *4* Using @file
.. index::
    pair: @file; Tutorial
    pair: sentinel lines; Tutorial

Before Leo 5.1, the recommended way to create external files was with **@file trees**. @file trees use the same markup as @clean trees.

The only difference between @clean and @file is that @file insert **sentinel lines** into external files. Sentinel lines are comments corresponding to markup. Sentinel lines *explicitly* represent structure.

Sentinel comments greatly simplify the update algorithm. There are no ambiguous lines @file trees! @file nodes are especially useful when everyone in a project uses Leo. Leo's sources use @file trees for the convenience of Leo developers.

@file is recommended if you seldom share files with others. Nevertheless,
most people will prefer to use @clean instead of @file. For example,
changing::

    @clean myfile.py
    
to::

    @file myfile.py
    
in the earlier example will create the file myfile.py whose contents looks something like this::

#@verbatim
    #@+leo-ver=5-thin
#@verbatim
    #@+node:ekr.20131023081456.2537: * @file myfile.py
#@verbatim
    #@@language python
#@verbatim
    #@@tabwidth -4
#@verbatim
    #@+<< docstring >>
#@verbatim
    #@+node:ekr.20131023081456.2538: ** << docstring >>
    '''This is the docstring for this python module.'''
#@verbatim
    #@-<< docstring >>
#@verbatim
    #@+others
#@verbatim
    #@+node:ekr.20131023081456.2539: ** main
    def main():
        print('hello from main')
#@verbatim
    #@-others
    if __name__ == '__main__':
        main()
#@verbatim
    #@-leo
    
Not pleasant to look at. However, sentinel lines won't bother you if you
seldom change the external file outside Leo. For example, Leo's developers
use @file nodes for Leo's own sources.
#@+node:ekr.20170216014459.7: *4* Summary: @clean vs @file
This tutorial discussed @clean first for three reasons:

1. To avoid making excuses for sentinel lines.

2. To brag about the @clean update algorithm.

3. It is often more convenient to avoid sentinels. For example, when I study other people's code I do the following:

- Create a `git <http://git-scm.com/>`_ repo of the directory to be studied, adding all the source files and doing an initial commit.
  
- Use an `import script <tutorial-scripting.html#import-scripts>`_ to create the desired @clean nodes.

- Explicitly save all the imported files using Ctrl-Shift-W (write-at-file-nodes).

- Use git diff to ensure that no important changes have been made while importing the nodes.

- Use git diff to track any changes I make (such as adding tracing or debugging statements) for the purposes of studying the code.

Using @clean is an essential part of this work flow. The diffs would be much harder to read if I had imported files into @file trees instead.

Nevertheless, there are advantages to using @file instead of @clean. All these advantages arise because external files created by @file contain explicit outline structure.

1. Leo can read @file trees more quickly and without guesses. There is never any need for you to move ambiguous lines from one node to another.
   
2. Sentinels allow you to share structure simply by sending the external file. This is an overwhelming advantage for Leo's developers.
   
3. A minor advantage: @file makes Leo files smaller. Leo stores the entire @clean *tree* in the .leo file, but only stores the top-level @file *node* in the .leo file.
   
In short: use @clean for most purposes. Use @file when all developers in a project use Leo.
#@+node:ekr.20170216014459.8: *4* Using @edit
.. index::
    pair: @edit; Tutorial

Use @edit instead of @clean to place the entire contents of an external file into a single outline node. Leo writes no sentinel when writing @edit files.
#@+node:ekr.20170216014459.9: *4* Using @auto
Use @auto instead of @clear when you expect other people to make substantial changes to an external file without using Leo.

.. index::
    pair: @auto; Tutorial
    pair: Importer; Tutorial

When reading @auto files, Leo **importers** create an outline from the external file. Importers create nodes for each class, method and function in the external file.

Notes:

- Leo determines the language using the file's extension.

- Importers exist for C, C#, elisp, html, .ini files, Java, Javascript, Pascal, PHP, Python, TypeScript, vimoutliner files and xml.

- If no importer exists for a file, Leo reads the entire file into an @edit node.
#@+node:ekr.20170216014459.10: *5* Using @persistence
.. index::
    pair: gnx; Tutorial
    pair: uA; Tutorial
    pair: @persistence; Tutorial

With @clean and @file, Leo can store **persistent data** in nodes. This information consists of the node's **gnx** (Global Node Index) and the node's **uA**, (User Attributes). The gnx gives each node a unique, immutable identity. Gnx's make clones possible. The uA allows scripts and plugins to associate arbitrarily much additional data with each node.

.. To do: links to further discussion of gnx and uA.

By default, Leo's importers preserve neither gnx's nor uA's. This makes imported @auto trees second class citizens. To remedy this, if an outline contains an @persistence node, Leo will save data in the @persistence tree that allows Leo to recover gnx's and uA's when re-reading @auto files later. This allows clone links and uA's to persist.

@persistence is an optional feature. The stored data is akin to bookmarks. The data can "break" (become inaccessible) if the structure (including class/method/function names) changes. However, the data will typically break infrequently. To disable this feature, just delete an existing @persistence node or change @persistence to @@persistence.
#@+node:ekr.20170216014459.11: *4* Directives
You now know enough to create external files and to choose between @clean and @file. The following sections discuss directives affecting external files.
#@+node:ekr.20170216014459.12: *5* Using @path
.. index::
    pair: \@path; Tutorial

Rather than specifying long paths in @file nodes, you can specify a path in
an ancestor @path node.

For example, suppose three nodes have the following headlines::

    @path a
        @path b
            @file c/d.py

The @file node creates the file a/b/c/d.py

Within @path and @<file> paths, ``{{exp}}`` gets evaluated with c, g, p, os and sys predefined.  For example::

    @file {{os.path.abspath(os.curdir)}}/abc.py
#@+node:ekr.20170216014459.13: *5* Using @first and @last
.. index::
    pair: @first; Tutorial
    pair: @last; Tutorial

The @first directive forces lines to appear before the first sentinel of a
external file. For example::

    @first #! /usr/bin/env python
    @first # -*- coding: utf-8 -*-

Similarly, @last forces lines to appear after the last sentinel.

The @first and @last directives aren't necessary within @clean trees, but
they are allowed within @clean trees.
#@+node:ekr.20170216014459.14: *4* Summary
- @clean, @file, @edit and @auto trees create external files. @clean is recommended for most purposes.

- When writing @clean, @file and @auto trees, Leo replaces @others and section references with their expansions.
  
- A **section name** is any text of the form: ``<< any text >>``.

- A **section definition node** is a node whose headline starts with a section name.

- A **section reference** is a section name in body text.

- @edit reads an entire external file into the body text of the @edit node.

- @auto imports an external file into an outline, creating nodes for functions, methods and classes. @persistence nodes optionally save gnx's and uA's of @auto trees in a semi-permanent way.
  
- @first places lines before the first sentinel lines of a file.

- @path specifies a common prefix for file names of @clean, @file, @edit and @auto nodes.

- leo/core/LeoPyRef.leo contains all of Leo's core source code.
#@+node:ekr.20150527092615.1: ** Unused code
@language python
@ignore
#@+node:ekr.20150601161120.1: *3* Beautifier...
#@+node:ekr.20150521132404.1: *4* class AddTokensToTree (AstFullTraverser)
class AddTokensToTree(leoAst.AstFullTraverser):
    '''
    A class that injects token-based data into Ast trees.
    
    This class could be folded into the LeoTidy class, but doing so would
    save negligible time.
    '''
        
    @others
#@+node:ekr.20150525074945.1: *5* add.ctor
def __init__(self,c,settings,tokens):
    
    leoAst.AstFullTraverser.__init__(self)
        # Init the base class.
    # Copy args...
    self.c = c
    self.settings = settings
    self.tokens = tokens
    # Other ivars...
    self.n_visits = 0
        # Number of calls to visit.
    self.n_set_tokens = 0
        # Number of calls to set_tokens
    self.prev_statement = None
        # The previous statement node.
    self.statements_d = {}
        # Set below.  Keys are statement kinds, values ignored.
    self.strings_list = []
        # A global list of tuples (n,s), for all string tokens.
    self.tokens_d = {}
        # (Debugging only) Keys are line numbers. Values are token5tuples.
    self.trailing_tokens_list = []
        # A list of tuples (n,kind,list_of_tokens)
    self.trailing_tokens_list_offset = 0
        # The number of trailing tokens already seen.
    # Compute data.
    self.make_statements_d()
    self.make_tokens_data(tokens)
        # Make strings_list, trailing_tokens_list.
        # Also makes tokens_d, used only for debugging.
#@+node:ekr.20150525105228.1: *5* add.dump_tokens_d
def dump_tokens_d(self):
    '''Print tokens_d'''
    print('-' * 20)
    print('AddTokensToTree: tokens_d...')
    for n in sorted(self.tokens_d.keys()):
        aList = self.tokens_d.get(n)
        if aList:
            pad = 6 * ' '
            pad2 = '\n' + 7 * ' '
            nl = '' if len(aList) == 1 else '\n'+pad
            result = []
            for n2,name,lws,s in aList:
                assert n == n2, (n,n2)
                lws = self.show_lws(lws)
                if name in ('newline','nl'):
                    s = repr(str(s))
                result.append('%10s lws: %-2s %s' % (name,lws,s))
            print('%4s: [%s%s]' % (n,pad2.join(result),nl))
#@+node:ekr.20150526094135.1: *5* add.dump_strings_list
def dump_strings_list(self):
    '''Dump the strings list.'''
    print('-' * 20)
    print('AddTokensToTree: strings_list...')
    for data in self.strings_list:
        n,name,lws,s = data
        lws = self.show_lws(lws)
        assert name == 'string',name
        print('%3s lws: %-2s %s' % (n,lws,s))
#@+node:ekr.20150526094136.1: *5* add.dump_trailing_list
def dump_trailing_list(self):
    '''Dump the trailing tokens list.'''
    print('-' * 20)
    print('AddTokensToTree: trailing_tokens_list...')
    for data in self.trailing_tokens_list:
        n,name,lws,s = data
        lws = self.show_lws(lws)
        if name in ('newline','nl'):
            s = repr(str(s))
        print('%4s: %8s lws: %-2s %s' % (n,name,lws,s))
#@+node:ekr.20150522110017.1: *5* add.make_statements_d
def make_statements_d(self):
    '''
    Return a dictionary of statements that should call set_tokens.
    
    In essence, these entries replace visitors.
    '''
    aList = [
        'Assert','Assign','AugAssign',
        'Break','Call','ClassDef','Continue','Delete',
        'ExceptHandler','Exec','Expr',
        'For','FunctionDef','Global',
        'If','Import','ImportFrom','Lambda', # Module
        'Pass','Print','Raise','Return',
        'Try','TryExcept','TryFinally',
        'While','With','Yield',
    ]
    self.statements_d = {}
    for z in aList:
        self.statements_d[z] = 0
#@+node:ekr.20150525083523.1: *5* add.make_tokens_data
def make_tokens_data(self,tokens):
    '''Make tokens_d, strings_list and trailing_tokens_list.'''
    self.strings_list = []
    self.tokens_d = {}
    self.trailing_tokens_list = []
    n = 1 # The line number (one-based)
    aList = [] # The list of tokens with line number n (one-based)
    lws = '' # The leading whitespace of the current line.
    for t1,t2,t3,t4,t5 in tokens:
        name = token.tok_name[t1].lower()
        s = g.toUnicode(t2)
        srow,scol = t3
        erow,ecol = t4
        raw_s = g.toUnicode(t5) # Contains leading whitespace!
        if n != srow:
            # g.pr("----- line",srow,erow,repr(line))
            self.tokens_d[n] = aList
            aList,n = [],srow
            for i,ch in enumerate(raw_s):
                if ch not in ' \t':
                    lws = raw_s[:i]
                    break
            else:
                lws = ''
        data = n,name,lws,s  
        if name == 'nl' and not aList:
            self.trailing_tokens_list.append(data)
        elif name == 'comment':
            self.trailing_tokens_list.append(data)
        elif name == 'string':
            g.trace(n,'raw_s',raw_s.rstrip())
            self.strings_list.append(data)
        else:
            pass
        # Put all tokens in self.tokens_d
        aList.append(data)
        # g.trace('%10s %r %r' % (name,lws,t2))
    # Finish the last line.
    self.tokens_d[n] = aList
    for setting, func in (
        ('ast_tokens_d',        self.dump_tokens_d),
        ('ast_strings_list',    self.dump_strings_list),
        ('ast_trailing_list',   self.dump_trailing_list),
    ):
        if self.settings.get(setting):
            func()
#@+node:ekr.20150521174358.1: *5* add.run
def run(self,node):
    '''The main line for the AddTokensToTree class.'''
    self.prev_statement = node
    self.visit(node)
    return self.n_visits
#@+node:ekr.20150521174136.1: *5* add.set_tokens & helper
def set_tokens(self,node):
    '''
    Compute the set of tokens associated with this node.
    node.lineno: the line number of source text: the first line is line 1.
    node.col_offset: the UTF-8 byte offset of the first token that generated the node.
    '''
    # n = node.lineno
    # col = node.col_offset
    self.inject_trailing_tokens()
    self.n_set_tokens += 1
    self.prev_statement = node
#@+node:ekr.20150525101059.1: *6* add.inject_trailing_tokens
def inject_trailing_tokens(self):
    '''Inject all previous comment tokens into self.prev_statement.'''
    trace = False
    prev = self.prev_statement
    prev_n = prev.lineno if hasattr(prev,'lineno') else 1
    n = 0
    offset = self.trailing_tokens_list_offset
    for data in self.trailing_tokens_list[offset:]:
        n2,name,kind,s = data
        if n2 < prev_n:
            n += 1
        else:
            break
    if n > 0:
        # Inject the comment_tokens into the previous statement.
        tokens = self.trailing_tokens_list[offset : offset+n]
        prev.trailing_tokens = tokens
        self.trailing_tokens_list_offset += n
        if trace:
            print('inject trailing tokens %3s %12s %s' % (
                n,self.kind(prev),tokens))
#@+node:ekr.20150526093911.1: *5* add.show_lws
def show_lws(self,s):
    '''Show leading whitespace in a convenient format.'''
    return repr(s) if s.strip(' ') else len(s)
#@+node:ekr.20150521174401.1: *5* add.visit
def visit(self,node):
    '''AddTokentsToTree.visit.'''
    self.n_visits += 1
    name = node.__class__.__name__
    if name in self.statements_d:
        self.set_tokens(node)
    method = getattr(self,'do_' + name)
    method(node)
#@+node:ekr.20150525171444.1: *5* add.Str
def do_Str (self,node):
    '''Associate a token with a ast.Str node.'''
    data = self.strings_list.pop(0)
    n,name,lws,s = data
    assert name == 'string'
    node.str_spelling = s
    # if not s.strip('"').strip("'").endswith(node.s):
        # g.trace('=======================',repr(node.s),repr(s))
#@+node:ekr.20150520173107.1: *4* class LeoTidy (Uses PythonTidy)
class LeoTidy:
    '''A class to beautify source code from an AST'''
    
    def __init__(self,c,options_d=None):
        '''Ctor for the LeoTidy class.'''
        self.c = c
        self.code_list = []
        self.in_arg_list = False
        self.indent = ' ' * 4
        self.level = 0
        self.tab_width = 4
    
    @others
#@+node:ekr.20150527171440.1: *5* class OutputToken
class OutputToken:
    '''A class representing items on the code list.'''
    
    def __init__(self,kind,lws,value):
        self.kind = kind
        self.lws = lws
        self.value = value
        
    def __repr__(self):
        return '%15s %-2s %s' % (self.kind,show_lws(self.lws),repr(self.value))
   
    __str__ = __repr__

    def to_string(self):
        '''Convert an output token to a string.'''
        return self.value if g.isString(self.value) else ''
#@+node:ekr.20150523083023.1: *5* lt.Code Generators
#@+node:ekr.20150523131619.1: *6* lt.add_token
def add_token(self,kind,value=''):
    '''Add a token to the code list.'''
    tok = self.OutputToken(kind,self.level,value)
    self.code_list.append(tok)
#@+node:ekr.20150526052853.1: *6* lt.arg_start & arg_end
def arg_end(self):
    '''Add a token indicating the end of an argument list.'''
    self.add_token('arg-end','')
    
def arg_start(self):
    '''Add a token indicating the start of an argument list.'''
    self.add_token('arg-start','')
#@+node:ekr.20150523083639.1: *6* lt.blank
def blank(self):
    '''Add a blank request on the code list.'''
    prev = self.code_list[-1]
    if prev.kind not in (
        'blank','blank-lines',
            # Suppress duplicates.
        'file-start','line-start','line-end',
            # These tokens implicitly suppress blanks.
        'arg-start','lit-no-blanks','lt',
            # These tokens explicity suppress blanks.
    ):
        self.add_token('blank',' ')
#@+node:ekr.20150523084306.1: *6* lt.blank_lines
def blank_lines(self,n):
    '''
    Add a request for n blank lines to the code list.
    Multiple blank-lines request yield at least the maximum of all requests.
    '''
    # Count the number of 'consecutive' end-line tokens, ignoring blank-lines tokens.
    prev_lines = 0
    i = len(self.code_list)-1 # start-file token guarantees i >= 0
    while True:
        kind = self.code_list[i].kind
        if kind == 'file-start':
            prev_lines = n ; break
        elif kind == 'blank-lines':
            i -= 1
        elif kind == 'line-end':
            i -= 1 ; prev_lines += 1
        else: break
    # g.trace('i: %3s n: %s prev: %s' % (len(self.code_list),n,prev_lines))
    while prev_lines <= n:
        self.line_end()
        prev_lines += 1
    # Retain the intention for debugging.
    self.add_token('blank-lines',n)
#@+node:ekr.20150524075023.1: *6* clean
def clean(self,kind):
    '''Remove the last item of token list if it has the given kind.'''
    prev = self.code_list[-1]
    if prev.kind == kind:
        self.code_list.pop()
#@+node:ekr.20150523085208.1: *6* lt.conditional_line_start
def conditional_line_start(self):
    '''Add a conditional line start to the code list.'''
    prev = self.code_list[-1]
    if prev.kind != 'start-line':
        self.add_token('start-line')
#@+node:ekr.20150523131526.1: *6* lt.file_start & file_end
def file_end(self):
    '''
    Add a file-end token to the code list.
    Retain exactly one line-end token.
    '''
    while True:
        prev = self.code_list[-1]
        if prev.kind in ('blank-lines','line-end'):
            self.code_list.pop()
        else:
            break
    self.add_token('line-end')
    self.add_token('file-end')

def file_start(self):
    '''Add a file-start token to the code list.'''
    self.add_token('file-start')
#@+node:ekr.20150523084222.1: *6* lt.line_start & line_end
def line_end(self):
    '''Add a line-end request to the code list.'''
    prev = self.code_list[-1]
    if prev.kind != 'file-start':
        self.add_token('line-end','\n')

def line_start(self):
    '''Add a line-start request to the code list.'''
    prev = self.code_list[-1]
    if prev.kind != 'line-start':
        self.add_token('line-start',self.indent*self.level)
#@+node:ekr.20150523083627.1: *6* lt.lit*
def lit(self,s):
    '''Add a request for a literal to the code list.'''
    assert s and g.isString(s),repr(s)
    # g.trace(repr(s),g.callers())
    self.add_token('lit',s)
    
def lit_blank(self,s):
    '''Add request for a liter (no previous blank) followed by a blank.'''
    self.clean('blank')
    self.lit(s)
    self.blank()

def lit_no_blanks(self,s):
    '''Add a request for a literal *not* surrounded by blanks.'''
    self.clean('blank')
    self.add_token('lit-no-blanks',s)
#@+node:ekr.20150523083651.1: *6* lt.lt & rt
def lt(self,s):
    '''Add a left paren request to the code list.'''
    assert s in '([{',repr(s)
    self.add_token('lt',s)
    
def rt(self,s):
    '''Add a right paren request to the code list.'''
    assert s in ')]}',repr(s)
    prev = self.code_list[-1]
    if prev.kind == 'arg-end':
        # Remove a blank token preceding the arg-end token.
        prev = self.code_list.pop()
        self.clean('blank')
        self.code_list.append(prev)   
    else:
        self.clean('blank')
    self.add_token('rt',s)
#@+node:ekr.20150522212520.1: *6* lt.op
def op(self,s):
    '''Add an operator request to the code list.'''
    assert s and g.isString(s),repr(s)
    self.blank()
    self.lit(s)
    self.blank()
#@+node:ekr.20150523083952.1: *6* lt.word
def word(self,s):
    '''Add a word request to the code list.'''
    assert s and g.isString(s),repr(s)
    self.blank()
    self.add_token('word',s)
    self.blank()
#@+node:ekr.20150520173107.2: *5* lt.Entries
#@+node:ekr.20150520173107.4: *6* lt.format
def format (self,node):
    '''Format the node (or list of nodes) and its descendants.'''
    self.level = 0
    self.file_start()
    val = self.visit(node)
    self.file_end()
    return ''.join([z.to_string() for z in self.code_list])
#@+node:ekr.20150520173107.5: *6* lt.visit
def visit(self,node):
    '''Return the formatted version of an Ast node, or list of Ast nodes.'''
    trace = False and not g.unitTesting
    assert isinstance(node,ast.AST),node.__class__.__name__
    method_name = 'do_' + node.__class__.__name__
    method = getattr(self,method_name)
    if trace: g.trace(method_name)
    method(node)
#@+node:ekr.20150520173107.69: *5* lt.Utils
#@+node:ekr.20150520173107.70: *6* lt.kind
def kind(self,node):
    '''Return the name of node's class.'''
    return node.__class__.__name__
#@+node:ekr.20150520173107.72: *6* lt.op_name
def op_name (self,node,strict=True):
    '''Return the print name of an operator node.'''
    d = {
    # Binary operators. 
    'Add':       '+',
    'BitAnd':    '&',
    'BitOr':     '|',
    'BitXor':    '^',
    'Div':       '/',
    'FloorDiv':  '//',
    'LShift':    '<<',
    'Mod':       '%',
    'Mult':      '*',
    'Pow':       '**',
    'RShift':    '>>',
    'Sub':       '-',
    # Boolean operators.
    'And':   'and',
    'Or':    'or',
    # Comparison operators
    'Eq':    '==',
    'Gt':    '>',
    'GtE':   '>=',
    'In':    'in',
    'Is':    'is',
    'IsNot': 'is not',
    'Lt':    '<',
    'LtE':   '<=',
    'NotEq': '!=',
    'NotIn': 'not in',
    # Context operators.
    'AugLoad':  '<AugLoad>',
    'AugStore': '<AugStore>',
    'Del':      '<Del>',
    'Load':     '<Load>',
    'Param':    '<Param>',
    'Store':    '<Store>',
    # Unary operators.
    'Invert':   '~',
    'Not':      'not',
    'UAdd':     '+',
    'USub':     '-',
    }
    name = d.get(self.kind(node),'<%s>' % node.__class__.__name__)
    if strict: assert name,self.kind(node)
    return name
#@+node:ekr.20150523083043.1: *5* lt.Visitors
#@+node:ekr.20150520173107.12: *6* lt.Expressions
#@+node:ekr.20150520173107.13: *7* lt.Expr
def do_Expr(self,node):
    '''An outer expression: must be indented.'''
    self.line_start()
    self.visit(node.value)
    self.line_end()
#@+node:ekr.20150520173107.14: *7* lt.Expression
def do_Expression(self,node):
    '''An inner expression: do not indent.'''
    self.visit(node.body)
    self.line_end()
#@+node:ekr.20150520173107.15: *7* lt.GeneratorExp
# GeneratorExp(expr elt, comprehension* generators)

def do_GeneratorExp(self,node):
    
    self.lt('(')
    self.visit(node.elt)
    self.word('for')
    if node.generators:
        for z in node.generators:
            self.visit(z)
    self.rt(')')
#@+node:ekr.20150520173107.17: *6* lt.Operands
#@+node:ekr.20150520173107.18: *7* lt.arguments
# arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)

def do_arguments(self,node):
    '''Format the arguments node.'''
    assert self.kind(node) == 'arguments',node
    self.in_arg_list = True
    n_plain = len(node.args) - len(node.defaults)
    n_args = len(node.args)
    self.arg_start()
    for i in range(n_args):
        if i < n_plain:
            self.visit(node.args[i])
        else:
            self.visit(node.args[i])
            self.op('=')
            self.visit(node.defaults[i-n_plain])
        if i + 1 < n_args:
            self.lit_blank(',')
    if getattr(node,'vararg',None):
        if node.args:
            self.lit_blank(',')
        self.lit('*')
        name = getattr(node,'vararg')
        self.word(name.arg if g.isPython3 else name)
    if getattr(node,'kwarg',None):
        if node.args or getattr(node,'vararg',None):
            self.lit_blank(',')
        self.lit('**')
        name = getattr(node,'kwarg')
        self.word(name.arg if g.isPython3 else name)
    self.arg_end()
    self.in_arg_list = False
#@+node:ekr.20150520173107.19: *7* lt.arg (Python3 only)
# Python 3:
# arg = (identifier arg, expr? annotation)

def do_arg(self,node):
    '''Return the name of the argument.'''
    self.word(node.arg)
#@+node:ekr.20150520173107.20: *7* lt.Attribute
# Attribute(expr value, identifier attr, expr_context ctx)

def do_Attribute(self,node):
    
    self.visit(node.value)
    self.lit_no_blanks('.')
    self.word(node.attr)
#@+node:ekr.20150520173107.21: *7* lt.Bytes
def do_Bytes(self,node): # Python 3.x only.
    assert g.isPython3
    self.lit(node.s)
#@+node:ekr.20150520173107.22: *7* lt.Call & lt.keyword
# Call(expr func, expr* args, keyword* keywords, expr? starargs, expr? kwargs)

def do_Call(self,node):

    self.visit(node.func)
    self.lit_no_blanks('(')
    for i,z in enumerate(node.args):
        self.visit(z)
        if i + 1 < len(node.args):
            self.lit_blank(',')
    if node.args and node.keywords:
        self.lit_blank(',')
    for i,z in enumerate(node.keywords):
        self.visit(z) # Calls f.do_keyword.
        if i + 1 < len(node.keywords):
            self.lit_blank(',')
    if getattr(node,'starargs',None):
        if node.args or node.keywords:
            self.lit_blank(',')
        self.lit('*')
        self.visit(node.starargs)
    if getattr(node,'kwargs',None):
        if node.args or node.keywords or getattr(node,'starargs',None):
            self.lit_blank(',')
        self.lit('**')
        self.visit(node.kwargs)
    self.rt(')')
#@+node:ekr.20150520173107.23: *8* lt.keyword
# keyword = (identifier arg, expr value)

def do_keyword(self,node):
    
    self.lit(node.arg)
        # node.arg is a string.
    self.lit('=')
    self.visit(node.value)
        # This is a keyword *arg*, not a Python keyword!
#@+node:ekr.20150520173107.24: *7* lt.comprehension
# comprehension (expr target, expr iter, expr* ifs)

def do_comprehension(self,node):
    
    self.visit(node.target)
    self.op('in')
    self.visit(node.iter)
    if node.ifs:
        for i,z in enumerate(node.ifs):
            self.word('if')
            self.visit(z)
#@+node:ekr.20150520173107.25: *7* lt.Dict
def do_Dict(self,node):
    
    self.lt('{')
    if node.keys:
        if len(node.keys) == len(node.values):
            # g.trace([(z.s,z.str_spelling) for z in node.keys])
            # g.trace([(z.s,z.str_spelling) for z in node.values])
            self.level += 1
            for i in range(len(node.keys)):
                self.visit(node.keys[i])
                self.lit(':')
                self.blank()
                self.visit(node.values[i])
                self.lit_blank(',')
                if i + 1 < len(node.keys):
                    self.line_start()
            self.level -= 1
        else:
            print('Error: f.Dict: len(keys) != len(values)\nkeys: %r\nvals: %r' % (
                node.keys,node.values))
    self.rt('}')
#@+node:ekr.20150520173107.26: *7* lt.Ellipsis
def do_Ellipsis(self,node):
    
    self.lit('...')
#@+node:ekr.20150520173107.27: *7* lt.ExtSlice
def do_ExtSlice (self,node):
    
    for i,z in enumerate(node.dims):
        self.visit(z)
        if i + 1 < len(node.dims):
            self.op(':')
#@+node:ekr.20150520173107.28: *7* lt.Index
def do_Index (self,node):
    
    self.visit(node.value)
#@+node:ekr.20150520173107.29: *7* lt.List
def do_List(self,node):

    # Not used: list context.
    # self.visit(node.ctx)
    self.lt('[')
    if node.elts:
        for i,z in enumerate(node.elts):
            self.visit(z)
            if i + 1 < len(node.elts):
                self.lit_blank(',')
    self.rt(']')
    
#@+node:ekr.20150520173107.30: *7* lt.ListComp
def do_ListComp(self,node):
    
    self.lt('[')
    self.visit(node.elt)
    self.word('for')
    for i,z in enumerate(node.generators):
        self.visit(z)
        ### ?
    self.rt(']')
#@+node:ekr.20150520173107.31: *7* lt.Name
def do_Name(self,node):

    self.word(node.id)
#@+node:ekr.20150520182346.1: *7* lt.NameConstant
# Python 3 only.

def do_NameConstant(self,node):
    
    self.lit(str(node.value))
#@+node:ekr.20150520173107.32: *7* lt.Num
def do_Num(self,node):
    
    self.lit(repr(node.n))
#@+node:ekr.20150520173107.33: *7* lt.Repr
# Python 2.x only
def do_Repr(self,node):
    
    self.word('repr')
    self.lt('(')
    self.visit(node.value)
    self.rt(')')
#@+node:ekr.20150520173107.34: *7* lt.Slice
def do_Slice (self,node):
    
    # g.trace(repr(node.lower),repr(node.upper),repr(node.step))
    if node.lower:
        self.visit(node.lower)
    self.op(':')
    if node.upper:
        self.visit(node.upper)
    if node.step:
        self.op(':')
        if hasattr(node.step,'id') and node.step.id == 'None':
            pass
        else:
            self.visit(node.step) 

    # if getattr(node,'lower',None) is not None:
        # self.visit(node.lower)
    # self.op(':')
    # if getattr(node,'upper',None) is not None:
        # self.visit(node.upper)
    # if getattr(node,'step',None) is not None:
        # if hasattr(node.step,'id'):
            # g.trace(node.step.id)
        # self.op(':')
        # g.trace(node.step)
        # self.visit(node.step) 
#@+node:ekr.20150520173107.35: *7* lt.Str
def do_Str (self,node):
    '''This represents a string constant.'''
    self.lit(node.str_spelling)
#@+node:ekr.20150520173107.36: *7* lt.Subscript
# Subscript(expr value, slice slice, expr_context ctx)

def do_Subscript(self,node):
    
    self.visit(node.value)
    self.lt('[')
    self.visit(node.slice)
    self.rt(']')
#@+node:ekr.20150520173107.37: *7* lt.Tuple
def do_Tuple(self,node):
    
    self.lt('(')
    for i,z in enumerate(node.elts):
        self.visit(z)
        if i + 1 < len(node.elts):
            self.lit_blank(',')
    self.rt(')')
#@+node:ekr.20150520173107.38: *6* lt.Operators
#@+node:ekr.20150520173107.39: *7* lt.BinOp
def do_BinOp (self,node):

    self.lt('(')
    self.visit(node.left)
    self.op(self.op_name(node.op))
    self.visit(node.right)
    self.rt(')')
    
#@+node:ekr.20150526141653.1: *7* lt.Compare ops
# Eq | NotEq | Lt | LtE | Gt | GtE | Is | IsNot | In | NotIn
 
def do_Eq   (self,node): self.op('==')
def do_Gt   (self,node): self.op('>')
def do_GtE  (self,node): self.op('>=')
def do_In   (self,node): self.word('in')
def do_Is   (self,node): self.word('is')
def do_IsNot(self,node): self.word('is not')
def do_Lt   (self,node): self.op('<')
def do_LtE  (self,node): self.op('<=')
def do_NotEq(self,node): self.op('!=')
def do_NotIn(self,node): self.word('not in')
#@+node:ekr.20150520173107.40: *7* lt.BoolOp
# BoolOp(boolop op, expr* values)

def do_BoolOp (self,node):
    
    op_name = self.op_name(node.op)
    self.lt('(')
    for i,z in enumerate(node.values):
        self.visit(z)
        if i + 1 < len(node.values):
            self.op(op_name)
    self.rt(')')
#@+node:ekr.20150520173107.41: *7* lt.Compare
# Compare(expr left, cmpop* ops, expr* comparators)

def do_Compare(self,node):
    
    self.lt('(')
    self.visit(node.left)
    assert len(node.ops) == len(node.comparators)
    for i in range(len(node.ops)):
        self.visit(node.ops[i])
        self.visit(node.comparators[i])
    self.rt(')')
#@+node:ekr.20150520173107.42: *7* lt.UnaryOp
# UnaryOp(unaryop op, expr operand)

def do_UnaryOp (self,node):
    
    name = self.op_name(node.op)
    self.lt('(')
    if name.isalpha():
        self.word(name)
    else:
        self.lit(name)
    self.visit(node.operand)
    self.rt(')')
    
#@+node:ekr.20150520173107.43: *7* lt.ifExp (ternary operator)
def do_IfExp (self,node):
    
    self.visit(node.body)
    self.word('if')
    self.visit(node.test)
    self.blank()
    self.word('else')
    self.visit(node.orelse)
    self.blank()
#@+node:ekr.20150520173107.44: *6* lt.Statements
#@+node:ekr.20150520173107.45: *7* lt.Assert
def do_Assert(self,node):

    self.line_start()
    self.word('assert')
    self.visit(node.test)
    if getattr(node,'msg',None):
        self.lit_blank(',')
        self.visit(node.msg)
    self.line_end()
#@+node:ekr.20150520173107.46: *7* lt.Assign
def do_Assign(self,node):

    self.line_start()
    for z in node.targets:
        self.visit(z)
        self.op('=')
    self.visit(node.value)
    self.line_end()
    
#@+node:ekr.20150520173107.47: *7* lt.AugAssign
def do_AugAssign(self,node):
    
    self.line_start()
    self.visit(node.target)
    self.op(self.op_name(node.op)+'=')
    self.visit(node.value)
    self.line_end()
#@+node:ekr.20150520173107.48: *7* lt.Break
def do_Break(self,node):

    self.line_start()
    self.word('break')
    self.line_end()
#@+node:ekr.20150520173107.7: *7* lt.ClassDef
# ClassDef(identifier name, expr* bases, stmt* body, expr* decorator_list)

def do_ClassDef (self,node):

    self.blank_lines(2)
    decorators = node.decorator_list
    if decorators:
        for i,z in enumerate(decorators):
            self.line_start()
            self.visit(z)
            self.line_end()
    self.line_start()
    self.word('class')
    self.word(node.name)
    if node.bases:
        self.lt('(')
        for i,z in enumerate(node.bases):
            self.visit(z)
            if i + 1 < len(node.bases):
                self.lit_blank(',')
        self.rt(')')
    self.lit_no_blanks(':')
    self.line_end()
    for z in node.body:
        self.level += 1
        self.visit(z)
        self.level -= 1
    self.blank_lines(2)
#@+node:ekr.20150520173107.49: *7* lt.Continue
def do_Continue(self,node):
    
    self.line_start()
    self.word('continue')
    self.line_end()
    
#@+node:ekr.20150520173107.50: *7* lt.Delete
def do_Delete(self,node):

    self.line_start()
    self.word('del')
    if node.targets:
        for i, z in enumerate(node.targets):
            self.visit(z)
            if i + 1 < len(node.targets):
                self.lit_blank(',')
    self.line_end()
#@+node:ekr.20150520173107.51: *7* lt.ExceptHandler
def do_ExceptHandler(self,node):
    
    # g.trace(node)
    self.line_start()
    self.word('except')
    if getattr(node,'type',None):
        self.blank()
        self.visit(node.type)
    if getattr(node,'name',None):
        self.word('as')
        if isinstance(node.name,ast.AST):
            self.visit(node.name)
        else:
            self.word(node.name) # Python 3.x.
    self.lit_no_blanks(':')
    self.line_end()
    for z in node.body:
        self.level += 1
        self.visit(z)
        self.level -= 1
#@+node:ekr.20150520173107.52: *7* lt.Exec
# Exec(expr body, expr? globals, expr? locals)

# Python 2.x only
def do_Exec(self,node):
    
    globals_ = getattr(node,'globals',None)
    locals_ = getattr(node,'locals',None)
    self.line_start()
    self.word('exec')
    self.visit(node.body)
    if globals_ or locals_:
        self.word('in')
    if globals_:
        self.visit(node.globals)
    if locals_:
        if globals_:
            self.lit_blank(',')
        self.visit(node.locals)
    self.line_end()
#@+node:ekr.20150520173107.53: *7* lt.For
def do_For (self,node):
    
    self.line_start()
    self.word('for')
    self.visit(node.target)
    self.op('in')
    self.visit(node.iter)
    self.lit_no_blanks(':')
    self.line_end()
    for z in node.body:
        self.level += 1
        self.visit(z)
        self.level -= 1
    if node.orelse:
        self.line_start()
        self.word('else')
        self.lit_no_blanks(':')
        self.line_end()
        for z in node.orelse:
            self.level += 1
            self.visit(z)
            self.level -= 1
#@+node:ekr.20150520173107.8: *7* lt.FunctionDef
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)

def do_FunctionDef (self,node):
    '''Format a FunctionDef node.'''
    self.blank_lines(1)
    if node.decorator_list:
        for z in node.decorator_list:
            self.line_start()
            self.op('@')
            self.visit(z)
            self.line_end()
    self.line_start()
    self.word('def')
    self.word(node.name)
    self.lt('(')
    if node.args:
        self.visit(node.args)
    self.rt(')')
    self.lit_no_blanks(':')
    self.line_end()
    for z in node.body:
        self.level += 1
        self.visit(z)
        self.level -= 1
    self.blank_lines(1)
#@+node:ekr.20150520173107.54: *7* lt.Global
def do_Global(self,node):
    
    self.line_start()
    self.word('global')
    for i,z in enumerate(node.names):
        self.word(z)
        if i + 1 < len(node.names):
            self.lit_blank(',')
    self.line_end()
#@+node:ekr.20150520173107.55: *7* lt.If
def do_If (self,node):
    
    self.line_start()
    self.word('if')
    self.visit(node.test)
    self.lit_no_blanks(':')
    self.line_end()
    for z in node.body:
        self.level += 1
        self.visit(z)
        self.level -= 1
    if node.orelse:
        self.line_start()
        self.word('else')
        self.lit_no_blanks(':')
        self.line_end()
        for z in node.orelse:
            self.level += 1
            self.visit(z)
            self.level -= 1
#@+node:ekr.20150520173107.56: *7* lt.Import & helper
def do_Import(self,node):
    
    self.line_start()
    self.word('import')
    aList = self.get_import_names(node)
    for i,data in enumerate(aList):
        fn,asname = data
        self.word(fn)
        if asname:
            self.op('as')
            self.word(asname)
        if i + 1 < len(aList):
            self.lit_blank(',')
    self.line_end()
#@+node:ekr.20150520173107.57: *8* lt.get_import_names
def get_import_names (self,node):
    '''Return a list of the the full file names in the import statement.'''
    result = []
    for ast2 in node.names:
        if self.kind(ast2) == 'alias':
            data = ast2.name,ast2.asname
            result.append(data)
        else:
            g.trace('unsupported kind in Import.names list',self.kind(ast2))
    return result
#@+node:ekr.20150520173107.58: *7* lt.ImportFrom
def do_ImportFrom(self,node):
    
    self.line_start()
    self.word('from')
    self.word(node.module)
    self.word('import')
    aList = self.get_import_names(node)
    for i, data in enumerate(aList):
        fn, asname = data
        self.word(fn)
        if asname:
            self.word('as')
            self.word(asname)
        if i + 1 < len(aList):
            self.lit_blank(',')
    self.line_end()
#@+node:ekr.20150520173107.11: *7* lt.Lambda
def do_Lambda (self,node):
    
    ### self.conditional_line_start()
    self.word('lambda')
    if node.args:
        self.visit(node.args)
    self.lit_no_blanks(':')
    self.visit(node.body)
    self.line_end()
#@+node:ekr.20150520173107.10: *7* lt.Module
def do_Module (self,node):
    
    for z in node.body:
        self.visit(z)
#@+node:ekr.20150520173107.59: *7* lt.Pass
def do_Pass(self,node):

    self.line_start()
    self.word('pass')
    self.line_end()
#@+node:ekr.20150520173107.60: *7* lt.Print (Still a problem)
# Python 2.x only
# Print(expr? dest, expr* values, bool nl)
def do_Print(self,node):

    self.line_start()
    self.word('print')
    self.lt('(')
    for i,z in enumerate(node.values):
        if isinstance(z,ast.Tuple):
            for z2 in z.elts:
                self.visit(z2)
                if i + 1 < len(z.elts):
                    self.lit_blank(',')
        else:
            self.visit(z)
        if i + 1 < len(node.values):
            self.lit_blank(',')
    # if getattr(node,'dest',None):
        # vals.append('dest=%s' % self.visit(node.dest))
    # if getattr(node,'nl',None):
        # vals.append('nl=%s' % node.nl)
    self.rt(')')
    self.line_end()
#@+node:ekr.20150520173107.61: *7* lt.Raise
def do_Raise(self,node):
    
    has_arg = False
    self.line_start()
    self.word('raise')
    for attr in ('type','inst','tback'):
        if getattr(node,attr,None) is not None:
            if has_arg:
                self.lit_blank(',')
            self.visit(getattr(node,attr))
            has_arg = True
    self.line_end()
#@+node:ekr.20150520173107.62: *7* lt.Return
def do_Return(self,node):
    
    self.line_start()
    self.word('return')
    if node.value:
        self.blank()
        self.visit(node.value)
    self.line_end()
#@+node:ekr.20150520202136.1: *7* lt.Try
# Try(stmt* body, excepthandler* handlers, stmt* orelse, stmt* finalbody)

def do_Try(self,node):

    self.line_start()
    self.word('try')
    self.lit_no_blanks(':')
    self.line_end()
    for z in node.body:
        self.level += 1
        self.visit(z)
        self.level -= 1
    if node.handlers:
        for z in node.handlers:
            self.visit(z)
    if node.orelse:
        self.line_start()
        self.word('else')
        self.lit_no_blanks(':')
        self.line_end()
        for z in node.orelse:
            self.level += 1
            self.visit(z)
            self.level -= 1 
    if node.finalbody:
        self.line_start()
        self.word('finally')
        self.lit_no_blanks(':')
        self.line_end()
        for z in node.finalbody:
            self.level += 1
            self.visit(z)
            self.level -= 1
#@+node:ekr.20150520173107.64: *7* lt.TryExcept (Python 2)
# TryExcept(stmt* body, excepthandler* handlers, stmt* orelse)
# TryFinally(stmt* body, stmt* finalbody)

def do_TryExcept(self,node):

    # g.trace(node)
    self.line_start()
    self.word('try:')
    self.line_end()
    for z in node.body:
        self.level += 1
        self.visit(z)
        self.level -= 1
    if node.handlers:
        for z in node.handlers:
            self.visit(z)
    if node.orelse:
        self.line_start()
        self.word('else:')
        self.line_end()
        for z in node.orelse:
            self.level += 1
            self.visit(z)
            self.level -= 1
    self.line_end()
#@+node:ekr.20150520173107.65: *7* lt.TryFinally (Python 2)
# TryExcept(stmt* body, excepthandler* handlers, stmt* orelse)
# TryFinally(stmt* body, stmt* finalbody)

def do_TryFinally(self,node):
    
    # g.trace(node)
    self.line_start()
    self.word('try:')
    self.line_end()
    for z in node.body:
        self.level += 1
        self.visit(z)
        self.level -= 1
    self.line_start()
    self.word('finally:')
    self.line_end()
    for z in node.finalbody:
        self.level += 1
        self.visit(z)
        self.level -= 1
    
#@+node:ekr.20150520173107.66: *7* lt.While
def do_While (self,node):
    
    self.line_start()
    self.word('while')
    self.visit(node.test)
    self.lit_no_blanks(':')
    self.line_end()
    for z in node.body:
        self.level += 1
        self.visit(z)
        self.level -= 1
    if node.orelse:
        self.line_start()
        self.word('else')
        self.lit_no_blanks(':')
        self.line_end()
        for z in node.orelse:
            self.level += 1
            self.visit(z)
            self.level -= 1
#@+node:ekr.20150520173107.67: *7* lt.With
# With(expr context_expr, expr? optional_vars, stmt* body)

def do_With (self,node):
    
    self.line_start()
    self.word('with')
    if hasattr(node,'context_expression'):
        self.visit(node.context_expresssion)
    vars_list = []
    if hasattr(node,'optional_vars'):
        try:
            for i,z in enumerate(node.optional_vars):
                self.visit(z)
                if i + 1 < len(node.optional_vars):
                    self.lit_blank(',')
        except TypeError: # Not iterable.
            self.visit(node.optional_vars) 
    self.lit_no_blanks(':')
    self.line_end()
    self.level += 1
    for z in node.body:
        self.visit(z)
    self.level -= 1
    self.line_end()
#@+node:ekr.20150520173107.68: *7* lt.Yield
# Yield(expr? value)

def do_Yield(self,node):
    
    self.line_start()
    self.word('yield')
    if hasattr(node,'value'):
        self.blank()
        self.visit(node.value)
    self.line_end()
#@+node:ekr.20160119213532.1: *3* Bracket matching...
#@+node:ekr.20160121163900.1: *4* class g.MatchBrackets (new & slow)
class MatchBrackets:
        
    @others
    
#@+node:ekr.20160121163900.2: *5* mb.ctor
def __init__(self, c, p, language):
    '''Ctor for MatchBrackets class.'''
    self.c = c
    self.p = p.copy()
    self.language = language
    self.s = None # set by find_matching_bracket.
    # Constants...
    self.close_brackets = ")]}"
    self.open_brackets = "([{"
    self.brackets = self.open_brackets + self.close_brackets
    ### self.matching_brackets = self.close_brackets + self.open_brackets
    # Language dependent.
    d1, d2, d3 = g.set_delims_from_language(language)
    self.single_comment, self.start_comment, self.end_comment = d1, d2, d3
    # g.trace(repr(d1), repr(d2), repr(d3))
    # Data structures for scanning...
    self.brackets_dict = {}
        # Keys are indices into self.s.
        # Values are (ch1, ch2, i, j)
        # s[i] == ch. s[j] == ch2 if j is not None.
    self.comments_and_strings_list = []
        # Values are (start_index, end_index)
#@+node:ekr.20160121163900.3: *5* mb.find_matching_bracket
def find_matching_bracket(self, ch1, s, i):
    '''Find the bracket matching s[i] for self.language.'''
    self.s = s
    if ch1 not in self.brackets:
        return None
    self.forward = forward = ch1 in self.open_brackets
    d = {
        '{':'}', '[':']', '(':')',
        '}':'{', ']':'[', ')':'(',
    }
    target = d.get(ch1)
    if not target:
        return None
    # # Find the character matching the initial bracket.
    # for n in range(len(self.brackets)):
        # if ch1 == self.brackets[n]:
            # target = self.matching_brackets[n]
            # break
    # else:
        # return None
    if self.forward:
        self.prescan(ch1, s, target)
    else:
        self.prescan(target, s, ch1)
    return self.lookup_matching_bracket(ch1, target, i)
#@+node:ekr.20160121163900.4: *5* mb.oops
def oops(self, s):
    '''Report an error in the match-brackets command.'''
    g.es_print(s, color='red')
#@+node:ekr.20160121163900.5: *5* mb.run
def run(self):
    '''The driver for the MatchBrackets class.'''
    # A partial fix for bug 127: Bracket matching is buggy.
    w = self.c.frame.body.wrapper
    brackets = "()[]{}<>"
    s = w.getAllText()
    ins = w.getInsertPoint()
    ch1 = 0 <= ins - 1 < len(s) and s[ins - 1] or ''
    ch2 = 0 <= ins < len(s) and s[ins] or ''
    # g.trace(repr(ch1),repr(ch2),ins)
    # Prefer to match the character to the left of the cursor.
    if ch1 and ch1 in self.brackets:
        ch = ch1; index = max(0, ins - 1)
    elif ch2 and ch2 in self.brackets:
        ch = ch2; index = ins
    else:
        return
    index2 = self.find_matching_bracket(ch, s, index)
    # g.trace('index,index2',index,index2)
    if index2 is not None:
        if index2 < index:
            w.setSelectionRange(index2, index + 1, insert=index2)
            # w.setSelectionRange(index + 1, index2, insert=index2)
            # g.trace('case 1',s[index2:index+1])
        else:
            w.setSelectionRange(index, index2 + 1, insert=min(len(s), index2 + 1))
            # g.trace('case2',s[index:index2+1])
        w.see(index2)
    else:
        g.es("unmatched", repr(ch))
#@+node:ekr.20160121163900.6: *5* mb.prescan & helpers
def prescan(self, ch1, s, target):
    
    d = self.brackets_dict
    i, level = 0, 0
    stack = [] # Stack of indices of open brackets.
    while i < len(s):
        progress = i
        ch = s[i]
        # g.trace(repr(ch),'i',i)
        if ch in '"\'':
            # Scan to the end of the string.
            i1 = i
            i = self.scan_string(s, i)
            self.comments_and_strings_list.append((i1,i),)
        elif self.starts_comment(s, i):
            i1 = i
            i = self.scan_comment(s, i)
            self.comments_and_strings_list.append((i1,i),)
        elif ch == '/' and self.is_regex(s, i):
            i = self.scan_regex(s, i)
                # Searching within a regex is pointless.
        elif ch == ch1:
            # An opening bracket.
            level += 1
            stack.append(i)
            d [i] = (ch1, target, i, None)
            i += 1
        elif ch == target:
            # A closing bracket.
            level -= 1
            if stack:
                # Update the matching dict entry.
                prev_i = stack.pop()
                data = d.get(prev_i)
                ch2, target2, junk_i2, junk_j2 = data
                assert ch2 == ch1, (ch1, ch2)
                assert target2 == target, (target, target2)
                assert junk_i2 == prev_i, (prev_i, junk_i2)
                assert junk_j2 is None, (junk_j2)
                # Create two entries in the brackets dict.
                d [prev_i] = (ch2, target2, prev_i, i)
                d [i] = (target, ch1, i, prev_i)
            else:
                self.oops('unmatched brackets: %s %s' % (ch1, target))
                d [i] = (target, ch1, i, None)
            i += 1
        else:
            # Any ordinary character.
            i += 1
        assert i > progress
    if stack:
        self.oops('unclosed brackets')
#@+node:ekr.20160121163900.7: *6* mb.is_regex
def is_regex(self, s, i):
    '''Return true if there is another slash on the line.'''
    assert s[i] == '/'
    i1 = i
    i += 1
    while i < len(s) and s[i] != '\n':
        if s[i] == '/':
           return True
        i += 1
    return False
#@+node:ekr.20160121163900.8: *6* mb.scan_comment
def scan_comment(self, s, i):
    '''Return the index of the character after a comment.'''
    i1 = i
    if g.match(s, i, self.start_comment):
        i += 1
        while i < len(s):
            if g.match(s, i, self.end_comment):
                i += len(self.end_comment)
                # g.trace('multi-line',s[i1:i])
                return i
            i += 1
        self.oops('unmatched multiline comment')
        return i
    else:
        # Scan to the newline.
        target = '\n'
        while i < len(s):
            if s[i] == '\n':
                i += 1
                # g.trace('single-line',s[i1,i].rstrip())
                return i
            i += 1
        return i
#@+node:ekr.20160121163900.9: *6* mb.scan_regex
def scan_regex(self, s, i):
    '''Scan a regex, including perl substitutions.'''
    assert s[i] == '/'
    i1 = i
    i += 1
    found = None
    while 0 <= i < len(s) and s[i] != '\n':
        ch = s[i]
        i2 = i - 1 # in case we have to look behind.
        i += 1
        if ch == '/':
            # Count the preceding backslashes.
            n = 0
            while 0 <= i2 < len(s) and s[i2] == '\\':
                n += 1
                i2 -= 1
            if (n % 2) == 0:
                # g.trace(i, s[i1:i9].strip())
                if self.language == 'perl':
                    # Allow either two or three slashes, but not more.
                    if found is None:
                        found = i
                    else:
                        found = i
                        break
                else:
                    # The second slash terminates the regex.
                    found = i
                    break
    if found is None:
        self.oops('unmatched regex delim')
        return i
    else:
        return found
#@+node:ekr.20160121163900.10: *6* mb.scan_string
def scan_string(self, s, i):
    '''
    Scan the string starting at s[i].
    Return the index of the next character.
    '''
    i1 = i
    delim = s[i]
    assert delim in "'\"", repr(delim)
    i += 1
    while i < len(s):
        ch = s[i]
        i2 = i - 1 # in case we have to look behind.
        i += 1
        if ch == delim:
            # Count the preceding backslashes.
            n = 0
            while 0 <= i2 < len(s) and s[i2] == '\\':
                n += 1
                i2 -= 1
            if (n % 2) == 0:
                # g.trace(i, s[i1: i].strip())
                return i
    # Annoying when matching brackets on the fly.
    # self.oops('unmatched string')
    return i + 1
#@+node:ekr.20160121163900.11: *6* mb.starts_comment
def starts_comment(self, s, i):
    '''Return True if s[i] starts a comment.'''
    return (
        self.single_comment and g.match(s, i, self.single_comment)
        or
        self.start_comment and self.end_comment and 
        g.match(s, i, self.start_comment))
#@+node:ekr.20160121163900.12: *5* mb.lookup_matching_bracket
def lookup_matching_bracket (self, ch1, target, index):
    '''Return the character matching ch1 at the given index into self.s'''
    assert self.s[index] == ch1, (ch1, self.s[index])
    data = self.brackets_dict.get(index)
    if data is None:
        # To do: scan comments/strings.
        g.trace('can not happen: not brackets_dict entry', index)
        return None
    else:
        ch2, target2, i, j = data
        g.trace(ch2, target2, i, j)
        assert ch2 in (ch1, target), (ch2, ch1, target)
        if i is not None:
            assert self.s[i] in (ch1, target), self.s[i]
        if j is not None:
            assert self.s[j] in (ch1, target), self.s[j]
        return j

#@+node:ekr.20031218072017.3189: *4* g.skip_matching_python_delims
def skip_matching_python_delims(s, i, delim1, delim2, reverse=False):
    '''
    Skip from the opening delim to the matching delim2.
    Return the index of the matching ')', or -1
    Deprecated.  Use g.MatchBrackets class instead.
    '''
    level = 0; n = len(s)
    # g.trace('delim1/2',repr(delim1),repr(delim2),'i',i,'s[i]',repr(s[i]),'s',repr(s[i-5:i+5]))
    assert(g.match(s, i, delim1))
    if reverse:
        while i >= 0:
            ch = s[i]
            if ch == delim1:
                level += 1; i -= 1
            elif ch == delim2:
                level -= 1
                if level <= 0: return i
                i -= 1
            # Doesn't handle strings and comments properly...
            else: i -= 1
    else:
        while i < n:
            progress = i
            ch = s[i]
            if ch == delim1:
                level += 1; i += 1
            elif ch == delim2:
                level -= 1
                if level <= 0: return i
                i += 1
            elif ch == '\'' or ch == '"': i = g.skip_string(s, i, verbose=False)
            elif g.match(s, i, '#'): i = g.skip_to_end_of_line(s, i)
            else: i += 1
            if i == progress: return -1
    return -1
#@+node:ekr.20110916215321.6712: *4* g.skip_matching_c_delims
def skip_matching_c_delims(s, i, delim1, delim2, reverse=False):
    '''Skip from the opening delim to the matching delim2.

    Return the index of the matching ')', or -1
    '''
    level = 0
    assert(g.match(s, i, delim1))
    if reverse:
        # Reverse scanning is tricky.
        # This doesn't handle single-line comments properly.
        while i >= 0:
            progress = i
            ch = s[i]
            if ch == delim1:
                level += 1; i -= 1
            elif ch == delim2:
                level -= 1
                if level <= 0: return i - 1
                i -= 1
            elif ch in ('\'', '"'):
                i -= 1
                while i >= 0:
                    if s[i] == ch and not s[i - 1] == '\\':
                        i -= 1; break
                    else:
                        i -= 1
            elif g.match(s, i, '*/'):
                i += 2
                while i >= 0:
                    if g.match(s, i, '/*'):
                        i -= 2
                        break
                    else:
                        i -= 1
            else: i -= 1
            if i == progress:
                g.trace('oops: reverse')
                return -1
    else:
        while i < len(s):
            progress = i
            ch = s[i]
            # g.trace(i,repr(ch))
            if ch == delim1:
                level += 1; i += 1
            elif ch == delim2:
                level -= 1; i += 1
                if level <= 0: return i
            elif ch in ('\'', '"'):
                i += 1
                while i < len(s):
                    if s[i] == ch and not s[i - 1] == '\\':
                        i += 1; break
                    else:
                        i += 1
            elif g.match(s, i, '//'):
                i = g.skip_to_end_of_line(s, i + 2)
            elif g.match(s, i, '/*'):
                i += 2
                while i < len(s):
                    if g.match(s, i, '*/'):
                        i += 2
                        break
                    else:
                        i += 1
            else: i += 1
            if i == progress:
                g.trace('oops')
                return -1
    g.trace('not found')
    return -1
#@+node:ekr.20150514063305.120: *4* selectToMatchingBracket (leoEditCommands) (no longer used)
# No longer used: use c.findMatchingBracket instead.
@cmd('select-to-matching-bracket')
def selectToMatchingBracket(self, event):
    '''Select text that matches the bracket near the cursor.'''
    c = self.c
    w = self.editWidget(event)
    if not w: return
    i = w.getInsertPoint()
    s = w.getAllText()
    allBrackets = self.openBracketsList + self.closeBracketsList
    if i < len(s) and s[i] in allBrackets:
        ch = s[i]
    elif i > 0 and s[i - 1] in allBrackets:
        i -= 1
        ch = s[i]
    else:
        g.es('no bracket selected')
        return
    d = {}
    if ch in self.openBracketsList:
        for z in range(len(self.openBracketsList)):
            d[self.openBracketsList[z]] = self.closeBracketsList[z]
        reverse = False # Search forward
    else:
        for z in range(len(self.openBracketsList)):
            d[self.closeBracketsList[z]] = self.openBracketsList[z]
        reverse = True # Search backward
    delim2 = d.get(ch)
    # This should be generalized...
    language = g.findLanguageDirectives(c, c.p)
    if language in ('c', 'cpp', 'csharp'):
        j = g.skip_matching_c_delims(s, i, ch, delim2, reverse=reverse)
    else:
        j = g.skip_matching_python_delims(s, i, ch, delim2, reverse=reverse)
    # g.trace(i,j,ch,delim2,reverse,language)
    if j not in (-1, i):
        if reverse:
            i += 1
            j += 1
        w.setSelectionRange(i, j, insert=j)
            # 2011/11/21: Bug fix: was ins=j.
        w.see(j)
#@+node:ekr.20060627080947: *4* skip_matching_python_parens (no longer used)
def skip_matching_python_parens(s, i):
    '''Skip from the opening ( to the matching ).

    Return the index of the matching ')', or -1
    '''
    # Deprecated: No longer used in Leo's core.
    return skip_matching_python_delims(s, i, '(', ')')
#@+node:ekr.20170303112757.1: *3* c2py (obsolete) Convert C code to Python syntax
@first
@language python
@tabwidth -4

import string

#####
##### This script is obsolete. Use Leo's c-to-python command instead.
#####

@ When using c2py as a script to translate entire files, use convertCFileToPython().  When using c2py within Leo, use convertCurrentTree().

Please set user data in the << specifying user types >> section.
@c

<< what c2py does >>
<< theory of operation >>
<< specify user types >>
tabWidth = 4 # how many blanks in a tab.
printFlag = False
doLeoTranslations = True ; dontDoLeoTranslations = False
<< define testData >>
@others

gClassName = "" # The class name for the present function.  Used to modify ivars.
gIvars = [] # List of ivars to be converted to self.ivar

def test():
    global printFlag ; printFlag = True
    for s in testData:
        convertCStringToPython(s, doLeoTranslations)

def go():
    test()

if __name__ == "__main__":
    speedTest(2)
#@+node:ekr.20170303112757.2: *4* << what c2py does >>
@
c2py converts C or C++ text into python text. The conversion is not complete.
Nevertheless, c2py eliminates much of the tedious text manipulation that would
otherwise be required.

The following is a list of the translations performed by convertCodeList:

I.  Prepass

These translations happen before removing all curly braces.

Suppose we are translating:

    aTypeSpec aClass::aMethod(t1 v1,...,tn vn)
    {
        body
    }

1. Translates the function prototype, i.e., translates:

    aTypeSpec aClass::aMethod(t1 v1,...,tn vn)
to:
    def aMethod(v1,...vn):

As a special case, c2py translates:

    aTypeSpec aClass::aClass(t1 v1,...,tn vn)
to:
    aClass.__init__(t1 v1,...,tn vn)

Yes, I know, aClass.__init__ isn't proper Python, but retaining the class name is useful.

2. Let t denote any member of typeList or classList.

    a) Removes all casts of the form (t) or (t*) or (t**), etc.
    b) Converts t x, t *x, t **x, etc. to x.
    c) Converts x = new t(...) to x = t(...)
    d) For all i in ivarsDict[aClass] converts this -> i to self.i
    e) For all i in ivarsDict[aClass] converts i to self.i

3. Converts < < x > > = to @c.  This Leo-specific translation is not done when translating files.

II.  Main Pass

This pass does the following simple translations everywhere except in comments and strings.

Changes all -> to .
Changes all this.self to self (This corrects problems during the prepass.)
Removes all curly braces
Changes all #if to if
Changes all else if to elif
Changes all #else to else:
Changes all else to else:
Removes all #endif
Changes all && to and
Changes all || to or
Changes all TRUE to True
Changes all FALSE to False
Changes all NULL to None
Changes all this to self
Changes all @code to @c.  This Leo-specific translation is not done when translating files.

III.  Complex Pass

This pass attempts more complex translations.

Converts if ( x ) to if x:
Converts elif ( x ) to elif x:
Converts while ( x ) to while x:
Converts for ( x ; y ; z ) to for x SEMI y SEMI z:

IV.  Final Pass

This pass completes the translation.

Removes all semicolons.
Removes @c if it starts the text.  This Leo-specific translation is not done when translating files.
Removes all blank lines.
Removes excess whitespace from all lines, leaving leading whitespace unchanged.
Replaces C/C++ comments by Python comments.
Removes trailing whitespace from all lines.
#@+node:ekr.20170303112757.3: *4* << theory of operation >>
@ Strategy and Performance

c2py is straightforward. The speed of c2py is unimportant. We don't care about
the memory used because we translate only small pieces of text at a time.

We can do body[i:j] = x, regardless of len(x). We can also do del body[i:j] to
delete characters.

We scan repeatedly through the text. Using many passes greatly simplifies the
code and does not slow down c2py significantly.

No scans are done within strings or comments. The idiom to handle such scans is
the following:

def someScan(body):
    i = 0
    while i < body(len):
        if isStringOrComment(body,i):
            i = skipStringOrComment(body,i)
        elif << found what we are looking for ? >> :
            << convert what we are looking for, setting i >>
        else: i += 1

That's about all there is to it.  The code was remarkably easy to write and seems clear to me.
#@+node:ekr.20170303112757.4: *4* << specify user types >>
@ Please change the following lists so they contain the types and classes used by your program.

c2py removes all type definitions correctly; it converts
    new aType(...)
to
    aType(...)
@c

classList = [
    "vnode", "tnode", "Commands",
    "wxString", "wxTreeCtrl", "wxTextCtrl", "wxSplitterWindow" ]

typeList = ["char", "void", "short", "long", "int", "double", "float"]

@ Please change ivarsDict so it represents the instance variables (ivars) used by your program's classes.

ivarsDict is a dictionary used to translate ivar i of class c to self.i.  It also translates this->i to self.i.
@c

ivarsDict = {
    "atFile": [ "mCommands", "mErrors", "mStructureErrors",
        "mTargetFileName", "mOutputFileName", "mOutputStream",
        "mStartSentinelComment", "mEndSentinelComment", "mRoot"],

    "vnode": ["mCommands", "mJoinList", "mIconVal", "mTreeID", "mT", "mStatusBits"],

    "tnode": ["mBodyString", "mBodyRTF", "mJoinHead", "mStatusBits", "mFileIndex",
        "mSelectionStart", "mSelectionLength", "mCloneIndex"],

    "LeoFrame": ["mNextFrame", "mPrevFrame", "mCommands"],

    "Commands": [
        # public
        "mCurrentVnode", "mLeoFrame", "mInhibitOnTreeChanged", "mMaxTnodeIndex",
        "mTreeCtrl", "mBodyCtrl", "mFirstWindowAndNeverSaved",
        #private
        "mTabWidth", "mChanged", "mOutlineExpansionLevel", "mUsingClipboard",
        "mFileName", "mMemoryInputStream", "mMemoryOutputStream", "mFileInputStream",
        "mInputFile", "mFileOutputStream", "mFileSize", "mTopVnode", "mTagList",
        "mMaxVnodeTag",
        "mUndoType", "mUndoVnode", "mUndoParent", "mUndoBack", "mUndoN",
        "mUndoDVnodes", "mUndoLastChild", "mUndoablyDeletedVnode" ]}
#@+node:ekr.20170303112757.5: *4* << define testData >>
testData = [ "\n@doc\n\
This is a doc part: format, whilest, {};->.\n\
<<\
section def>>=\n\
LeoFrame::LeoFrame(vnode *v, char *s, int i)\n\
{\n\
    // test ; {} /* */.\n\
    #if 0 //comment\n\
        if(gLeoFrameList)gLeoFrameList -> mPrevFrame = this ;\n\
        else\n\
            this -> mNextFrame = gLeoFrameList ;\n\
    #else\n\
        \n\
        vnode *v = new vnode(a,b);\n\
        Commands *commander = (Commands) NULL ; // after cast\n\
        this -> mPrevFrame = NULL ;\n\
    #endif\n\
    if (a==b)\n\
        a = 2;\n\
    else if (a ==c)\n\
        a = 3;\n\
    else return; \n\
    /* Block comment test:\n\
        if(2):while(1): end.*/\n\
    for(int i = 1; i < limit; ++i){\n\
        mVisible = FALSE ;\n\
        mOnTop = TRUE ;\n\
    }\n\
    // trailing ws.	 \n\
    mCommands = new Commands(this, mTreeCtrl, mTextCtrl) ;\n\
    gActiveFrame = this ;\n\
}\n\
    ", "<<" +
"vnode methods >>=\n\
\n\
void vnode::OnCopyNode(wxCommandEvent& WXUNUSED(event))\n\
{\n\
    mCommands -> copyOutline();\n\
}\n\
\n@doc\n\
another doc part if, then, else, -> \n<<" +
"vnode methods >>=\n\
void vnode::OnPasteNode(wxCommandEvent& WXUNUSED(event))\n\
{\n\
    mCommands -> pasteOutline();\n\
}\n" ]
#@+node:ekr.20170303112757.6: *4* speedTest
def speedTest(passes):

    import time
    file = r"c:\prog\LeoPy\LeoPy.leo"
    f=open(file)
    if not f:
        print "not found: ", file
        return
    s=f.read()
    f.close()
    print "file:", file, " size:", len(s), " passes:", passes
    print "speedTest start"
    time1 = time.clock()
    p = passes
    while p > 0:
        n = len(s) ; i = 0 ; lines = 0
        while -1 < i < n:
            if s[i] == '\n':
                lines += 1 ; i += 1
            else:
                i = s.find('\n',i) # _much_ faster than list-based-find.
            continue
            # match is about 9 times slower than simple test.
            if s[i]=='\n': # match(s,i,'\n'): # 
                i += 1
            else:
                i += 1
        p -= 1
    time2 = time.clock()
    print "lines:", lines
    print "speedTest done:"
    print "elapsed time:", time2-time1
    print "time/pass:", (time2-time1)/passes
#@+node:ekr.20170303112757.7: *4* leo1to2
#@+node:ekr.20170303112757.8: *5* leo1to2
def leo1to2():

    import leo
    import leoGlobals
    c=leoGlobals.top()
    v=c.currentVnode()
    convertLeo1to2(v,c)
#@+node:ekr.20170303112757.9: *5* convertLeo1to2
def convertLeo1to2(v,c):

    after=v.nodeAfterTree()
    while v and v != after:
        s=v.b
        print "converting:", v.h
        s=convertStringLeo1to2(s)
        c.setBodyString(v,s)
        v=v.threadNext()

    c.Repaint() # for backward compatibility
    print "end of leo1to2"
#@+node:ekr.20170303112757.10: *5* convertStringLeo1to2
def convertStringLeo1to2 (s):

    # print "convertStringLeo1to2:start\n", s
    codeList = stringToList(s) ; outputList = []
    i = 0
    while i < len(codeList):
        j = skipCodePart(codeList,i)
        if j > i:
            code = codeList[i:j]
            convertCodeList1to2(code)
            i = j
            #print "-----code:", listToString(code)
            for item in code:
                outputList.append(item)
        j = skipDocPart(codeList,i)
        if j > i:
            doc = codeList[i:j]
            convertDocList(doc) # same as in c2py
            #print "-----doc:", listToString(doc)
            i = j
            for item in doc:
                outputList.append(item)

    result = listToString(outputList)
    global printFlag
    if printFlag: print "-----:\n", result
    return result
#@+node:ekr.20170303112757.11: *5* convertCodeList1to2
@ We do _not_ replace @root by @file or insert @others as needed.  Inserting @others can be done easily enough by hand, and may take more global knowledge than we can reasonably expect to have.
@c

def convertCodeList1to2(list):

    if 0: # There isn't much reason to do this.
        removeAtRoot(list)
    safeReplace(list, "@code", "@c")
    replaceSectionDefs(list)
    removeLeadingAtCode(list)
#@+node:ekr.20170303112757.12: *4* c2py entry points
@ We separate the processing into two parts,

1) a leo-aware driver that iterates over @file trees and
2) a text-based part that processes one or more files or strings.
#@+node:ekr.20170303112757.13: *5* convertCurrentTree
def convertCurrentTree():

    import c2py
    import leo
    import leoGlobals
    c=leoGlobals.top()
    v = c.currentVnode()
    c2py.convertLeoTree(v,c)
#@+node:ekr.20170303112757.14: *5* convertLeoTree
def convertLeoTree(v,c):

    after=v.nodeAfterTree()
    while v and v != after:
        s=v.b
        print "converting:", v.h
        s=convertCStringToPython(s, doLeoTranslations )
        c.setBodyString(v,s)
        v=v.threadNext()
    c.Repaint() # for backward compatibility.
    print "end of c2py"
#@+node:ekr.20170303112757.15: *5* convertCFileToPython
def convertCFileToPython(file):

    f=open(file, 'r')
    if not f: return
    s = f.read()
    f.close();
    f=open(file + ".py", 'w')
    if not f: return
    s = convertCStringToPython(s, dontDoLeoTranslations )
    f.write(s)
    f.close()
#@+node:ekr.20170303112757.16: *4* convertCStringToPython & helpers (top level)
def convertCStringToPython(s, leoFlag):

    # print "convertCStringToPython:start\n", s
    firstPart = True
    codeList = stringToList(s)

    if not leoFlag:
        convertCodeList(codeList, firstPart, dontDoLeoTranslations)
        return listToString(codeList)

    outputList = []
    i = 0
    while i < len(codeList):
        j = skipCodePart(codeList,i)
        if j > i:
            code = codeList[i:j]
            convertCodeList(code, firstPart, doLeoTranslations)
            i = j
            #print "-----code:", listToString(code)
            for item in code:
                outputList.append(item)
        firstPart = False # don't remove @c from here on.
        j = skipDocPart(codeList,i)
        if j > i:
            doc = codeList[i:j]
            convertDocList(doc)
            #print "-----doc:", listToString(doc)
            i = j
            for item in doc:
                outputList.append(item)

    result = listToString(outputList)
    global printFlag
    if printFlag: print "-----:\n", result
    return result
#@+node:ekr.20170303112757.17: *5* convertCodeList (main pattern function)
def convertCodeList(list, firstPart, leoFlag):
    #first
    replace(list, "\r", None)
    convertLeadingBlanks(list)
    if leoFlag:
        replaceSectionDefs(list)
    mungeAllFunctions(list)
    #next
    safeReplace(list, " -> ", '.')
    safeReplace(list, "->", '.')
    safeReplace(list, " . ", '.')
    safeReplace(list, "this.self", "self")
    safeReplace(list, "{", None)
    safeReplace(list, "}", None)
    safeReplace(list, "#if", "if")
    safeReplace(list, "#else", "else")
    safeReplace(list, "#endif", None)
    safeReplace(list, "else if", "elif")
    safeReplace(list, "else", "else:")
    safeReplace(list, "&&", "and")
    safeReplace(list, "||", "or")
    safeReplace(list, "TRUE", "True")
    safeReplace(list, "FALSE", "False")
    safeReplace(list, "NULL", "None")
    safeReplace(list, "this", "self")
    safeReplace(list, "try", "try:")
    safeReplace(list, "catch", "except:")
    if leoFlag:
        safeReplace(list, "@code", "@c")
    #next
    handleAllKeywords(list)
    # after processing for keywords
    removeSemicolonsAtEndOfLines(list)
    #last
    if firstPart and leoFlag: removeLeadingAtCode(list)
    removeBlankLines(list)
    removeExcessWs(list)
    # your taste may vary: in Python I don't like extra whitespace
    safeReplace(list, " :", ":") 
    safeReplace(list, ", ", ",")
    safeReplace(list, " ,", ",")
    safeReplace(list, " (", "(")
    safeReplace(list, "( ", "(")
    safeReplace(list, " )", ")")
    safeReplace(list, ") ", ")")
    replaceComments(list) # should follow all calls to safeReplace
    removeTrailingWs(list)
    safeReplace(list, "\t ", "\t") # happens when deleting declarations.
#@+node:ekr.20170303112757.18: *5* convertDocList
def convertDocList(docList):

    # print "convertDocList:", docList
    if matchWord(docList, 0, "@doc"):
        i = skipWs(docList, 4)
        if match(docList, i, "\n"):
            i += 1
        docList[0:i] = list("@ ")
#@+node:ekr.20170303112757.19: *5* skipDocPart
def skipDocPart(list, i):

    # print "skipDocPart", i
    while i < len(list):
        if matchWord(list, i, "@code") or matchWord(list, i, "@c"):
            break
        elif isSectionDef(list,i):
            break
        else: i = skipPastLine(list, i)
    return i
#@+node:ekr.20170303112757.20: *5* skipCodePart
def skipCodePart(codeList, i):

    # print "skipCodePart", i
    if matchWord(codeList, i, "@doc") or matchWord(codeList, i, "@"):
        return i
    while i < len(codeList):
        if match(codeList, i, "//"):
            i = skipPastLine(codeList,i)
        elif match(codeList, i, "/*"):
            i = skipCBlockComment(codeList,i)
        elif match(codeList, i, '"') or match(codeList, i, "'"):
            i = skipString(codeList,i)
        elif match(codeList, i, "\n"):
            i += 1
            if matchWord(codeList, i, "@doc") or matchWord(codeList, i, "@"):
                break
        else: i += 1
    return i
#@+node:ekr.20170303112757.21: *4* Scanning & Replacing...
#@+node:ekr.20170303112757.22: *5* convertLeadingBlanks
def convertLeadingBlanks(list):

    global tabWidth
    if tabWidth < 2: return
    i = 0
    while i < len(list):
        n = 0
        while i < len(list) and list[i] == ' ':
            n += 1 ; i += 1
            if n == tabWidth:
                list[i-tabWidth:i] = ['\t']
                i = i - tabWidth + 1
                n = 0
        i = skipPastLine(list, i)
#@+node:ekr.20170303112757.23: *5* mungeAllFunctions
# We scan for a '{' at the top level that is preceeded by ')'
# @code and < < x > > = have been replaced by @c
def mungeAllFunctions(codeList):

    prevSemi = 0 # Previous semicolon: header contains all previous text
    i = 0
    firstOpen = None
    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
            prevSemi = i
        elif match(codeList, i, '('):
            if not firstOpen:
                firstOpen = i
            i += 1
        elif match(codeList, i, '#'):
            i = skipPastLine(codeList, i)
            prevSemi = i
        elif match(codeList, i, ';'):
            i += 1
            prevSemi = i
        elif matchWord(codeList, i, "@code"):
            i += 5
            prevSemi = i # restart the scan
        elif matchWord(codeList, i, "@c"):
            i += 2 ; prevSemi = i # restart the scan
        elif match(codeList, i, "{"):
            i = handlePossibleFunctionHeader(codeList,i,prevSemi,firstOpen)
            prevSemi = i ; firstOpen = None # restart the scan
        else: i += 1
#@+node:ekr.20170303112757.24: *6* handlePossibleFunctionHeader
# converts function header lines from c++ format to python format.
# That is, converts
# x1..nn w::y ( t1 z1,..tn zn) {
# to
# def y (z1,..zn): {

def handlePossibleFunctionHeader(codeList, i, prevSemi, firstOpen):

    assert(match(codeList,i,"{"))
    prevSemi = skipWsAndNl(codeList, prevSemi)
    close = prevNonWsOrNlChar(codeList, i)
    if close < 0 or codeList[close] != ')':
        return 1 + skipToMatchingBracket(codeList, i)
    if not firstOpen:
        return 1 + skipToMatchingBracket(codeList, i)
    close2 = skipToMatchingBracket(codeList, firstOpen)
    if close2 != close:
        return 1 + skipToMatchingBracket(codeList, i)
    open = firstOpen
    assert(codeList[open]=='(')
    head = codeList[prevSemi:open]
    # do nothing if the head starts with "if", "for" or "while"
    k = skipWs(head,0)
    if k >= len(head) or not head[k] in string.letters:
        return 1 + skipToMatchingBracket(codeList, i)
    kk = skipPastWord(head,k)
    if kk > k:
        headString = listToString(head[k:kk])
        # C keywords that might be followed by '{'
        # print "headString:", headString
        if headString in [ "class", "do", "for", "if", "struct", "switch", "while"]:
            return 1 + skipToMatchingBracket(codeList, i)
    args = codeList[open:close+1]
    k = 1 + skipToMatchingBracket(codeList,i)
    body = codeList[i:k]
    #print "head:", listToString(head)
    #print "args:", listToString(args)
    #print "body:", listToString(body)
    #print "tot: ", listToString(codeList[prevSemi:k])
    head = massageFunctionHead(head)
    args = massageFunctionArgs(args)
    body = massageFunctionBody(body)
    #print "head2:", listToString(head)
    #print "args2:", listToString(args)
    #print "body2:", listToString(body)
    #print "tot2: ", listToString(codeList[prevSemi:k])
    result = []
    for item in head:
        result.append(item)
    for item in args:
        result.append(item)
    for item in body:
        result.append(item)
    codeList[prevSemi:k] = result
    return k
#@+node:ekr.20170303112757.25: *6* massageFunctionArgs
def massageFunctionArgs(args):
    global gClassName
    assert(args[0]=='(')
    assert(args[-1]==')')

    result = ['('] ; lastWord = []
    if gClassName:
        for item in list("self,"): result.append(item) #can put extra comma

    i = 1
    while i < len(args):
        i = skipWsAndNl(args, i)
        c = args[i]
        if c in string.letters:
            j = skipPastWord(args,i)
            lastWord = args[i:j]
            i = j
        elif c == ',' or c == ')':
            for item in lastWord:
                result.append(item)
            if lastWord != [] and c == ',':
                result.append(',')
            lastWord = []
            i += 1
        else: i += 1
    if result[-1] == ',':
        del result[-1]
    result.append(')')
    result.append(':')
    # print "new args:", listToString(result)
    return result
#@+node:ekr.20170303112757.26: *6* massageFunctionHead (sets gClassName)
def massageFunctionHead(head):

    # print "head:", listToString(head)
    result = []
    prevWord = []
    global gClassName ; gClassName = []
    i = 0
    while i < len(head):
        i = skipWsAndNl(head, i)
        if i < len(head) and head[i] in string.letters:
            result = []
            j = skipPastWord(head,i)
            prevWord = head[i:j]
            i = j
            # look for ::word2
            i = skipWs(head,i)
            if match(head,i,"::"):
                # Set the global to the class name.
                gClassName = listToString(prevWord)
                # print "class name:", gClassName
                i = skipWs(head, i+2)
                if i < len(head) and (head[i]=='~' or head[i] in string.letters):
                    j = skipPastWord(head,i)
                    if head[i:j] == prevWord:
                        for item in list("__init__"): result.append(item)
                    elif head[i]=='~' and head[i+1:j] == prevWord:
                        for item in list("__del__"): result.append(item)
                    else:
                        # for item in "::": result.append(item)
                        for item in head[i:j]: result.append(item)
                    i = j
            else:
                for item in prevWord:result.append(item)
        else: i += 1

    finalResult = list("def ")
    for item in result: finalResult.append(item)
    # print "new head:", listToString(finalResult)
    return finalResult
#@+node:ekr.20170303112757.27: *6* massageFunctionBody
def massageFunctionBody(body):

    body = massageIvars(body)
    body = removeCasts(body)
    body = removeTypeNames(body)
    return body
#@+node:ekr.20170303112757.28: *7* massageIvars
def massageIvars(body):

    if gClassName and ivarsDict.has_key(gClassName):
        ivars = ivarsDict [ gClassName ]
    else:
        ivars = []
    # print "key:ivars=", gClassName, ':', `ivars`

    i = 0
    while i < len(body):
        if isStringOrComment(body,i):
            i = skipStringOrComment(body,i)
        elif body[i] in string.letters:
            j = skipPastWord(body,i)
            word = listToString(body[i:j])
            # print "looking up:", word
            if word in ivars:
                # replace word by self.word
                # print "replacing", word, " by self.", word
                word = "self." + word
                word = list(word)
                body[i:j] = word
                delta = len(word)-(j-i)
                i = j + delta
            else: i = j
        else: i += 1
    return body
#@+node:ekr.20170303112757.29: *7* removeCasts
def removeCasts(body):

    i = 0
    while i < len(body):
        if isStringOrComment(body,i):
            i = skipStringOrComment(body,i)
        elif match(body, i, '('):
            start = i
            i = skipWs(body, i+1)
            if body[i] in string.letters:
                j = skipPastWord(body,i)
                word = listToString(body[i:j])
                i = j
                if word in classList or word in typeList:
                    i = skipWs(body, i)
                    while match(body,i,'*'):
                        i += 1
                    i = skipWs(body, i)
                    if match(body,i,')'):
                        i += 1
                        # print "removing cast:", listToString(body[start:i])
                        del body[start:i]
                        i = start
        else: i += 1
    return body
#@+node:ekr.20170303112757.30: *7* removeTypeNames
# Do _not_ remove type names when preceeded by new.

def removeTypeNames(body):

    i = 0
    while i < len(body):
        if isStringOrComment(body,i):
            i = skipStringOrComment(body,i)
        elif matchWord(body, i, "new"):
            i = skipPastWord(body,i)
            i = skipWs(body,i)
            # don't remove what follows new.
            if body[i] in string.letters:
                i = skipPastWord(body,i)
        elif body[i] in string.letters:
            j = skipPastWord(body,i)
            word = listToString(body[i:j])
            if word in classList or word in typeList:
                k = skipWs(body, j)
                while match(body,k,'*'):
                    k += 1 ; j = k
                # print "Deleting type name:", listToString(body[i:j])
                del body[i:j]
            else:
                i = j
        else: i += 1
    return body
#@+node:ekr.20170303112757.31: *5* handleAllKeywords
# converts if ( x ) to if x:
# converts while ( x ) to while x:
def handleAllKeywords(codeList):

    # print "handAllKeywords:", listToString(codeList)
    i = 0
    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif ( matchWord(codeList,i,"if") or
            matchWord(codeList,i,"while") or
            matchWord(codeList,i,"for") or
            matchWord(codeList,i,"elif") ):
            i = handleKeyword(codeList,i)
        else:
            i += 1
    # print "handAllKeywords2:", listToString(codeList)
#@+node:ekr.20170303112757.32: *6* handleKeyword
def handleKeyword(codeList,i):

    isFor = False
    if (matchWord(codeList,i,"if")):
        i += 2
    elif (matchWord(codeList,i,"elif")):
        i += 4
    elif (matchWord(codeList,i,"while")):
        i += 5
    elif (matchWord(codeList,i,"for")):
        i += 3
        isFor = True
    else: assert(0)
    # Make sure one space follows the keyword
    k = i
    i = skipWs(codeList,i)
    if k == i:
        c = codeList[i]
        codeList[i:i+1] = [ ' ', c ]
        i += 1
    # Remove '(' and matching ')' and add a ':'
    if codeList[i] == "(":
        j = removeMatchingBrackets(codeList,i)
        if j > i and j < len(codeList):
            c = codeList[j]
            codeList[j:j+1] = [":", " ", c]
            j = j + 2
        return j
    return i
#@+node:ekr.20170303112757.33: *5* isX...
#@+node:ekr.20170303112757.34: *6* isWs and isWOrNl
def isWs(c):
    return c == ' ' or c == '\t'

def isWsOrNl(c):
    return c == ' ' or c == '\t' or c == '\n'
#@+node:ekr.20170303112757.35: *6* isSectionDef
# returns the ending index if i points to < < x > > =
def isSectionDef(list, i):

    i = skipWs(list,i)
    if not match(list,i,"<<"): return False
    while i < len(list) and list[i] != '\n':
        if match(list,i,">>="): return i+3
        else: i += 1
    return False
#@+node:ekr.20170303112757.36: *6* isStringOrComment
def isStringOrComment(list, i):

    return match(list,i,"'") or match(list,i,'"') or match(list,i,"//") or match(list,i,"/*")
#@+node:ekr.20170303112757.37: *5* find... & match...
#@+node:ekr.20170303112757.38: *6* findInCode
def findInCode(codeList, i, findStringOrList):

    findList = stringToList(findStringOrList)

    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif match(codeList, i, findList):
            return i
        else: i += 1
    return -1
#@+node:ekr.20170303112757.39: *6* findInList
def findInList(list, i, findStringOrList):

    findList = stringToList(findStringOrList)

    while i < len(list):
        if match(list, i, findList): return i
        else: i += 1
    return -1
#@+node:ekr.20170303112757.40: *6* match
# returns True if findList matches starting at codeList[i]

def match (codeList, i, findStringOrList):

    findList = stringToList(findStringOrList)
    n = len(findList)
    j = 0
    while i+j < len(codeList) and j < len(findList):
        if codeList[i+j] != findList[j]:
            return False
        else:
            j += 1
            if j == n:
                return i+j
    return False
#@+node:ekr.20170303112757.41: *6* matchWord
def matchWord (codeList, i, findStringOrList):

    j = match(codeList,i,findStringOrList)
    if not j:
        return False
    elif j >= len(codeList):
        return True
    else:
        c = codeList[j]
        return not (c in string.letters or c in string.digits or c == '_')
#@+node:ekr.20170303112757.42: *5* remove...
#@+node:ekr.20170303112757.43: *6* removeAllCComments
def removeAllCComments(list, delim):

    i = 0
    while i < len(list):
        if match(list,i,"'") or match(list,i,'"'):
            i = skipString(list,i)
        elif match(list,i,"//"):
            j = skipPastLine(list,i)
            print "deleting single line comment:", listToString(list[i:j])
            del list[i:j]
        elif match(list,i,"/*"):
            j = skipCBlockComment(list,i)
            print "deleting block comment:", listToString(list[i:j])
            del list[i:j]
        else:
            i += 1
#@+node:ekr.20170303112757.44: *6* removeAllCSentinels
def removeAllCSentinels(list, delim):

    i = 0
    while i < len(list):
        if match(list,i,"'") or match(list,i,'"'):
            # string starts a line.
            i = skipString(list,i)
            i = skipPastLine(list,i)
        elif match(list,i,"/*"):
            # block comment starts a line
            i = skipCBlockComment(list,i)
            i = skipPastLine(line,i)
        elif match(list,i,"//@"):
            j = skipPastLine(list,i)
            print "deleting sentinel:", listToString(list[i:j])
            del list[i:j]
        else:
            i = skipPastLine(list,i)
#@+node:ekr.20170303112757.45: *6* removeAllPythonComments
def removeAllPythonComments(list, delim):

    i = 0
    while i < len(list):
        if match(list,i,"'") or match(list,i,'"'):
            i = skipString(list,i)
        elif match(list,i,"#"):
            j = skipPastLine(list,i)
            print "deleting comment:", listToString(list[i:j])
            del list[i:j]
        else:
            i += 1
#@+node:ekr.20170303112757.46: *6* removeAllPythonSentinels
def removeAllPythonSentinels(list, delim):

    i = 0
    while i < len(list):
        if match(list,i,"'") or match(list,i,'"'):
            # string starts a line.
            i = skipString(list,i)
            i = skipPastLine(list,i)
        elif match(list,i,"#@"):
            j = skipPastLine(list,i)
            print "deleting sentinel:", listToString(list[i:j])
            del list[i:j]
        else:
            i = skipPastLine(list,i)
#@+node:ekr.20170303112757.47: *6* removeAtRoot
def removeAtRoot (codeList):

    i = skipWs(codeList, 0)
    if matchWord(codeList,i,"@root"):
        j = skipPastLine(codeList,i)
        del codeList[i:j]

    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif match(codeList,i,"\n"):
            i = skipWs(codeList, i+1)
            if matchWord (codeList,i,"@root"):
                j = skipPastLine(codeList,i)
                del codeList[i:j]
        else: i += 1
#@+node:ekr.20170303112757.48: *6* removeBlankLines
def removeBlankLines(codeList):

    i = 0
    while i < len(codeList):
        j = i
        while j < len(codeList) and (codeList[j]==" " or codeList[j]=="\t"):
            j += 1
        if j== len(codeList) or codeList[j] == '\n':
            del codeList[i:j+1]
        else:
            oldi = i
            i = skipPastLine(codeList,i)
#@+node:ekr.20170303112757.49: *6* removeExcessWs
def removeExcessWs(codeList):

    i = 0
    i = removeExcessWsFromLine(codeList,i)
    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif match(codeList,i,'\n'):
            i += 1
            i = removeExcessWsFromLine(codeList,i)
        else: i += 1
#@+node:ekr.20170303112757.50: *7* removeExessWsFromLine
def removeExcessWsFromLine(codeList,i):

    assert(i==0 or codeList[i-1] == '\n')
    i = skipWs(codeList,i)
    while i < len(codeList):
        if isStringOrComment(codeList,i): break # safe
        elif match(codeList, i, '\n'): break
        elif match(codeList, i, ' ') or match(codeList, i, '\t'):
            # Replace all whitespace by one blank.
            k = i
            i = skipWs(codeList,i)
            codeList[k:i] = [' ']
            i = k + 1 # make sure we don't go past a newline!
        else: i += 1
    return i
#@+node:ekr.20170303112757.51: *6* removeLeadingAtCode
def removeLeadingAtCode(codeList):

    i = skipWsAndNl(codeList,0)
    if matchWord(codeList,i,"@code"):
        i = skipWsAndNl(codeList,5)
        del codeList[0:i]
    elif matchWord(codeList,i,"@c"):
        i = skipWsAndNl(codeList,2)
        del codeList[0:i]
#@+node:ekr.20170303112757.52: *6* removeMatchingBrackets
def removeMatchingBrackets(codeList, i):

    j = skipToMatchingBracket(codeList, i)
    if j > i and j < len(codeList):
        # print "del brackets:", listToString(codeList[i:j+1])
        c = codeList[j]
        if c == ')' or c == ']' or c == '}':
            del codeList[j:j+1]
            del codeList[i:i+1]
            # print "returning:", listToString(codeList[i:j])
            return j - 1
        else: return j + 1
    else: return j
#@+node:ekr.20170303112757.53: *6* removeSemicolonsAtEndOfLines
def removeSemicolonsAtEndOfLines(list):

    i = 0
    while i < len(list):
        if isStringOrComment(list,i):
            i = skipStringOrComment(list,i)
        elif list[i] == ';':
            j = skipWs(list,i+1)
            if j >= len(list) or match(list,j,'\n') or match(list,j,'#') or match(list,j,"//"):
                del list[i]
            else: i += 1
        else: i += 1
#@+node:ekr.20170303112757.54: *6* removeTrailingWs
def removeTrailingWs(list):

    i = 0
    while i < len(list):
        if isWs(list[i]):
            j = i
            i = skipWs(list,i)
            assert(j < i)
            if i >= len(list) or list[i] == '\n':
                # print "removing trailing ws:", `i-j`
                del list[j:i]
                i = j
        else: i += 1
#@+node:ekr.20170303112757.55: *5* replace... & safeReplace
#@+node:ekr.20170303112757.56: *6* replace
# Replaces all occurances of findString by changeString.
# Deletes all occurances if change is None
def replace(codeList, findString, changeString):

    if len(findString)==0: return
    findList = stringToList(findString)
    changeList = stringToList(changeString)

    i = 0
    while i < len(codeList):
        if match(codeList, i, findList):
            codeList[i:i+len(findList)] = changeList
            i += len(changeList)
        else: i += 1
#@+node:ekr.20170303112757.57: *6* replaceComments
# For Leo we expect few block comments; doc parts are much more common.

def replaceComments(codeList):

    i = 0
    if match(codeList, i, "//"):
        codeList[0:2] = ['#']
    while i < len(codeList):
        if match(codeList, i, "//"):
            codeList[i:i+2] = ['#']
            i = skipPastLine(codeList,i)
        elif match(codeList, i, "/*"):
            j = skipCBlockComment(codeList,i)
            del codeList[j-2:j]
            codeList[i:i+2] = ['#']
            j -= 2 ; k = i ; delta = -1
            while k < j + delta :
                if codeList[k]=='\n':
                    codeList[k:k+1] = ['\n', '#', ' ']
                    delta += 2 ; k += 3 # progress!
                else: k += 1
            i = j + delta
        elif match(codeList, i, '"') or match(codeList, i, "'"):
            i = skipString(codeList,i)
        else: i += 1
#@+node:ekr.20170303112757.58: *6* replaceSectionDefs
# Replaces < < x > > = by @c (at the start of lines).
def replaceSectionDefs(codeList):

    i = 0
    j = isSectionDef(codeList,i)
    if j > 0: codeList[i:j] = list("@c ")

    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif match(codeList,i,"\n"):
            i += 1
            j = isSectionDef(codeList,i)
            if j > i: codeList[i:j] = list("@c ")
        else: i += 1
#@+node:ekr.20170303112757.59: *6* safeReplace
# Replaces occurances of findString by changeString outside of C comments and strings.
# Deletes all occurances if change is None.
def safeReplace(codeList, findString, changeString):

    if len(findString)==0: return
    findList = stringToList(findString)
    changeList = stringToList(changeString)
    i = 0
    if findList[0] in string.letters: #use matchWord
        while i < len(codeList):
            if isStringOrComment(codeList,i):
                i = skipStringOrComment(codeList,i)
            elif matchWord(codeList, i, findList):
                codeList[i:i+len(findList)] = changeList
                i += len(changeList)
            else: i += 1
    else: #use match
        while i < len(codeList):
            if match(codeList, i, findList):
                codeList[i:i+len(findList)] = changeList
                i += len(changeList)
            else: i += 1
#@+node:ekr.20170303112757.60: *5* skip... & prev...
#@+node:ekr.20170303112757.61: *6* prevNonWsChar and prevNonWsOrNlChar
def prevNonWsChar(list, i):

    i -= 1
    while i >= 0 and isWs(list[i]):
        i -= 1
    return i

def prevNonWsOrNlChar(list, i):

    i -= 1
    while i >= 0 and isWsOrNl(list[i]):
        i -= 1
    return i
#@+node:ekr.20170303112757.62: *6* skipCBlockComment
def skipCBlockComment(codeList, i):

    assert(match(codeList, i, "/*"))
    i += 2

    while i < len(codeList):
        if match(codeList, i, "*/"): return i + 2
        else: i += 1
    return i
#@+node:ekr.20170303112757.63: *6* skipPastLine
def skipPastLine(codeList, i):

    while i < len(codeList) and codeList[i] != '\n':
        i += 1
    if i < len(codeList) and codeList[i] == '\n':
        i += 1
    return i
#@+node:ekr.20170303112757.64: *6* skipPastWord
def skipPastWord(list, i):

    assert(list[i] in string.letters or list[i]=='~')

    # Kludge: this helps recognize dtors.
    if list[i]=='~':
        i += 1

    while i < len(list) and (
        list[i] in string.letters or
        list[i] in string.digits or
        list[i]=='_'):
        i += 1
    return i
#@+node:ekr.20170303112757.65: *6* skipString
def skipString(codeList, i):

    delim = codeList[i] # handle either single or double-quoted strings
    assert(delim == '"' or delim == "'")
    i += 1

    while i < len(codeList):
        if codeList[i] == delim: return i + 1
        elif codeList[i] == '\\': i += 2
        else: i += 1
    return i
#@+node:ekr.20170303112757.66: *6* skipStringOrComment
def skipStringOrComment(list,i):

    if match(list,i,"'") or match(list,i,'"'):
        return skipString(list,i)
    if match(list, i, "//"):
        return skipPastLine(list,i)
    elif match(list, i, "/*"):
        return skipCBlockComment(list,i)
    else: assert(0)
#@+node:ekr.20170303112757.67: *6* skipToMatchingBracket
def skipToMatchingBracket(codeList, i):

    c = codeList[i]
    if   c == '(': delim = ')'
    elif c == '{': delim = '}'
    elif c == '[': delim = ']'
    else: assert(0)

    i += 1
    while i < len(codeList):
        c = codeList[i]
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif c == delim:
            return i
        elif c == '(' or c == '[' or c == '{':
            i = skipToMatchingBracket(codeList,i)
            i += 1 # skip the closing bracket.
        else: i += 1
    return i
#@+node:ekr.20170303112757.68: *6* skipWs and skipWsAndNl
def skipWs(list, i):

    while i < len(list):
        c = list[i]
        if c == ' ' or c == '\t':
            i += 1
        else: break
    return i

def skipWsAndNl(list, i):

    while i < len(list):
        c = list[i]
        if c == ' ' or c == '\t' or c == '\n':
            i += 1
        else: break
    return i
#@+node:ekr.20170303112757.69: *5* stringToList & listToString
#@+node:ekr.20170303112757.70: *6* stringToList
# converts a string to a list containing one item per character of the list.
# converts None to the empty string and leaves other types alone.

# list(string) does not work on none.
def stringToList(string):

    if string:
        return list(string)
    else:
        return []
#@+node:ekr.20170303112757.71: *6* listToString
def listToString(list):

    return string.join(list,"")
#@+node:ekr.20150312225028.6: *3* class LogManager (not used yet)
class LogManager:

    '''A class to handle the global log, and especially
    switching the log from commander to commander.'''

    def __init__ (self):

        trace = (False or g.trace_startup) and not g.unitTesting
        if trace: g.es_debug('(LogManager)')

        self.log = None             # The LeoFrame containing the present log.
        self.logInited = False      # False: all log message go to logWaiting list.
        self.logIsLocked = False    # True: no changes to log are allowed.
        self.logWaiting = []        # List of messages waiting to go to a log.
        self.printWaiting = []      # Queue of messages to be sent to the printer.
        self.signon_printed = False # True: the global signon has been printed.

    @others
#@+node:ekr.20150312225028.7: *4* LogM.setLog, lockLog, unlocklog
def setLog (self,log):

    """set the frame to which log messages will go"""

    # print("app.setLog:",log,g.callers())
    if not self.logIsLocked:
        self.log = log

def lockLog(self):
    """Disable changes to the log"""
    self.logIsLocked = True

def unlockLog(self):
    """Enable changes to the log"""
    self.logIsLocked = False
#@+node:ekr.20150312225028.8: *4* LogM.writeWaitingLog
def writeWaitingLog (self,c):
    '''Write all waiting lines to the log.'''
    trace = True
    lm = self
    if trace:
        # Do not call g.es, g.es_print, g.pr or g.trace here!
        print('** writeWaitingLog','silent',g.app.silentMode,c.shortFileName())
        # print('writeWaitingLog',g.callers())
        # import sys ; print('writeWaitingLog: argv',sys.argv)
    if not c or not c.exists:
        return
    if g.unitTesting:
        lm.printWaiting = []
        lm.logWaiting = []
        g.app.setLog(None) # Prepare to requeue for other commanders.
        return
    table = [
        ('Leo Log Window','red'),
        (g.app.signon,'black'),
        (g.app.signon2,'black'),
    ]
    table.reverse()
    c.setLog()
    lm.logInited = True # Prevent recursive call.
    if not lm.signon_printed:
        lm.signon_printed = True
        if not g.app.silentMode:
            print('')
            print('** isPython3: %s' % g.isPython3)
            if not g.enableDB:
                print('** caching disabled')
            print(g.app.signon)
            print(g.app.signon2)
    if not g.app.silentMode:
        for s in lm.printWaiting:
            print(s)
    lm.printWaiting = []
    if not g.app.silentMode:
        for s,color in table:
            lm.logWaiting.insert(0,(s+'\n',color),)
        for s,color in lm.logWaiting:
            g.es('',s,color=color,newline=0)
                # The caller must write the newlines.
    lm.logWaiting = []
    # Essential when opening multiple files...
    lm.setLog(None)
#@+node:ekr.20170123152147.1: *3* Commands and menus
#@+node:ekr.20170123152205.1: *4* Commands
#@+node:ekr.20150514063305.90: *5* advertizedUndo
@cmd('advertised-undo')
def advertizedUndo(self, event):
    '''Undo the previous command.'''
    self.c.undoer.undo()
#@+node:ekr.20061031131434.115: *5* k.digitArgument & universalArgument
@cmd('universal-argument')
def universalArgument(self, event):
    '''Prompt for a universal argument.'''
    k = self
    k.setLabelBlue('Universal Argument: ')
    k.universalDispatcher(event)

@cmd('digit-argument')
def digitArgument(self, event):
    '''Prompt for a digit argument.'''
    k = self
    k.setLabelBlue('Digit Argument: ')
    k.universalDispatcher(event)
#@+node:ekr.20061031131434.117: *5* k.negativeArgument (redo?)
@cmd('negative-argument')
def negativeArgument(self, event):
    '''Prompt for a negative digit argument.'''
    g.trace('not ready yet')
    # k = self ; state = k.getState('neg-arg')
    # if state == 0:
        # k.setLabelBlue('Negative Argument: ')
        # k.setState('neg-arg',1,k.negativeArgument)
    # else:
        # k.clearState()
        # k.resetLabel()
        # func = k.negArgFunctions.get(k.stroke)
        # if func:
            # func(event)
#@+node:ekr.20061031131434.118: *5* k.numberCommand
@cmd('number-command')
def numberCommand(self, event, stroke, number):
    '''Enter a number prefix for commands.'''
    k = self; c = self.c
    k.stroke = stroke
    w = event and event.widget
    k.universalDispatcher(event)
    g.app.gui.event_generate(c, chr(number), chr(number), w)
    return

@cmd('number-command-0')
def numberCommand0(self, event):
    '''Execute command number 0.'''
    return self.numberCommand(event, None, 0)

@cmd('number-command-1')
def numberCommand1(self, event):
    '''Execute command number 1.'''
    return self.numberCommand(event, None, 1)

@cmd('number-command-2')
def numberCommand2(self, event):
    '''Execute command number 2.'''
    return self.numberCommand(event, None, 2)

@cmd('number-command-3')
def numberCommand3(self, event):
    '''Execute command number 3.'''
    return self.numberCommand(event, None, 3)

@cmd('number-command-4')
def numberCommand4(self, event):
    '''Execute command number 4.'''
    return self.numberCommand(event, None, 4)

@cmd('number-command-5')
def numberCommand5(self, event):
    '''Execute command number 5.'''
    return self.numberCommand(event, None, 5)

@cmd('number-command-6')
def numberCommand6(self, event):
    '''Execute command number 6.'''
    return self.numberCommand(event, None, 6)

@cmd('number-command-7')
def numberCommand7(self, event):
    '''Execute command number 7.'''
    return self.numberCommand(event, None, 7)

@cmd('number-command-8')
def numberCommand8(self, event):
    '''Execute command number 8.'''
    return self.numberCommand(event, None, 8)

@cmd('number-command-9')
def numberCommand9(self, event):
    '''Execute command number 9.'''
    return self.numberCommand(event, None, 9)
#@+node:ekr.20170123145319.1: *4* Menus
#@+node:ekr.20170123145324.1: *5* Unused Chapters menu
#@+node:ekr.20170123145324.2: *6* @item chapter-&clone-node-to
#@+node:ekr.20170123145324.3: *6* @item chapter-c&opy-node-to
#@+node:ekr.20170123145324.4: *6* @item chapter-c&reate
#@+node:ekr.20170123145324.5: *6* @item chapter-&move-node-to
#@+node:ekr.20170123145324.6: *6* @item chapter-&remove
#@+node:ekr.20170123144320.1: *5* Unused abbrev Menu
#@+node:ekr.20170123144320.2: *6* @item &toggle-abbrev-mode
#@+node:ekr.20170123144320.3: *6* @item -
#@+node:ekr.20170123144320.4: *6* @item abbrev-&read
#@+node:ekr.20170123144320.5: *6* @item abbrev-&write
#@+node:ekr.20170123144320.6: *6* @item -
#@+node:ekr.20170123144320.7: *6* @item abbrev-&add-global
#@+node:ekr.20170123144320.8: *6* @item abbrev-&inverse-add-global
#@+node:ekr.20170123145642.1: *5* Unused Macros Menu
#@+node:ekr.20170123145629.1: *6* @menu &Macros
#@+node:ekr.20170123145629.2: *7* @item macro-start-&recording
#@+node:ekr.20170123145629.3: *7* @item macro-&end-recording
#@+node:ekr.20170123145629.4: *7* @item macro-&name-last
#@+node:ekr.20170123145629.5: *7* @item -
#@+node:ekr.20170123145629.6: *7* @item macro-&call
#@+node:ekr.20170123145629.7: *7* @item macro-call-&last
#@+node:ekr.20170123145629.8: *7* @item -
#@+node:ekr.20170123145629.9: *7* @item macro-&load-&all
#@+node:ekr.20170123145629.10: *7* @item macro-&print-all
#@+node:ekr.20170123145629.11: *7* @item macro-&save-all
#@+node:ekr.20150526115312.1: *3* compare_ast (can hang?)
# http://stackoverflow.com/questions/3312989/
# elegant-way-to-test-python-asts-for-equality-not-reference-or-object-identity

def compare_ast(node1, node2):

    trace = False and not g.unitTesting

    def fail(node1, node2, tag):
        '''Report a failed mismatch in the beautifier. This is a bug.'''
        name1 = node1.__class__.__name__
        name2 = node2.__class__.__name__
        format = 'compare_ast failed: %s: %s %s %r %r'
        if name1 == 'str':
            print(format % (tag, name1, name2, node1, node2))
        elif name1 == 'Str':
            print(format % (tag, name1, name2, node1.s, node2.s))
        elif 1:
            format = 'compare_ast failed: %s: %s %s\n%r\n%r'
            print(format % (tag, name1, name2, node1, node2))
        else:
            format = 'compare_ast failed: %s: %s %s\n%r\n%r\n%r %r'
            attr1 = getattr(node1, 'lineno', '<no lineno>')
            attr2 = getattr(node2, 'lineno', '<no lineno>')
            print(format % (tag, name1, name2, node1, node2, attr1, attr2))

    # pylint: disable=unidiomatic-typecheck
    if type(node1) != type(node2):
        if trace: fail(node1, node2, 'type mismatch')
        return False
    # The types of node1 and node2 match. Recursively compare components.
    if isinstance(node1, ast.AST):
        for kind, var in vars(node1).items():
            if kind not in ('lineno', 'col_offset', 'ctx'):
                var2 = vars(node2).get(kind) # Bug fix: 2016/05/16.
                if not compare_ast(var, var2):
                    if trace: fail(var, var2, 'AST subnode mismatch')
                    return False
        return True
    elif isinstance(node1, list):
        if len(node1) != len(node2):
            if trace: fail(node1, node2, 'list len mismatch')
            return False
        for i in range(len(node1)):
            if not compare_ast(node1[i], node2[i]):
                if trace: fail(node1, node2, 'list element mismatch')
                return False
        return True
    elif node1 != node2:
        if trace: fail(node1, node2, 'node mismatch')
        return False
    else:
        return True
#@+node:ekr.20150312225028.9: *3* Existing autocompleter (keep)
#@+node:ekr.20150312225028.10: *4* completeSelf (not used yet)
def completeSelf (self):

    g.trace(g.callers(4))

    # This scan will be fast if an instant object already exists.
    className,obj,p,s = self.classScanner.scan()
    # g.trace(className,obj,p,s and len(s))

    # First, look up the className.
    if not obj and className:
        obj = self.allClassesDict.get(className)
        # if obj: g.trace('found in allClassesDict: %s = %s' % (className,obj))

    # Second, create the object from class definition.
    if not obj and s:
        theClass = self.computeClassObjectFromString(className,s)
        if theClass:
            obj = self.createProxyObjectFromClass(className,theClass)
            if obj:
                self.selfObjectsDict [className] = obj
                # This prevents future rescanning, even if the node moves.
                self.selfVnodesDict [p.v] = obj
    if obj:
        self.selfClassName = className
        self.push(self.theObject)
        self.theObject = obj
        self.membersList = self.getMembersList(obj=obj)
    else:
        # No further action possible or desirable.
        self.selfClassName = None
        self.theObject = None
        self.clear()
        self.membersList = []
#@+node:ekr.20150312225028.11: *4* class ForgivingParserClass (not used)
class ForgivingParserClass:

    '''A class to create a valid class instances from
    a class definition that may contain syntax errors.'''

    @others
#@+node:ekr.20150312225028.12: *5* ctor (ForgivingParserClass)
def __init__ (self,c):

    self.c = c
    self.excludedTnodesList = []
    self.old_putBody = None # Set in parse for communication with newPutBody.
#@+node:ekr.20150312225028.13: *5* parse
def parse (self,p):

    '''The top-level parser method.

    It patches c.atFileCommands.putBody, calls the forgiving parser and finally
    restores c.atFileCommands.putBody.'''

    c = self.c

    # Create an ivar for communication with newPutBody.
    self.old_putBody = c.atFileCommands.putBody

    # Override atFile.putBody.
    c.atFileCommands.putBody = self.newPutBody

    try:
        s = None
        s = self.forgivingParser(p)
    finally:
        c.atFileCommands.putBody = self.old_putBody

    return s # Don't put a return in a finally clause.


#@+node:ekr.20150312225028.14: *5* forgivingParser (leoKeys)
def forgivingParser (self,p,suppress=False):

    c = self.c ; root = p.copy()
    self.excludedTnodesList = []
    s = g.getScript(c,root,useSelectedText=False)
    while s:
        try:
            if not g.isPython3:
                s = g.toEncodedString(s)
            compile(s+'\n','<string>','exec')
            break
        except SyntaxError:
            fileName, n = g.getLastTracebackFileAndLineNumber()
            p = self.computeErrorNode(c,root,n,lines=g.splitLines(s))
            if not p or p == root:
                if not suppress:
                    g.es_print('syntax error in class node: can not continue')
                s = None ; break
            else:
                # g.es_print('syntax error: deleting',p.h)
                self.excludedTnodesList.append(p.v)
                s = g.getScript(c,root,useSelectedText=False)
        except Exception:
            g.trace('unexpected exception')
            g.es_exception()
            break
    return s or ''
#@+node:ekr.20150312225028.15: *5* computeErrorNode (leoKeys)
def computeErrorNode (self,c,root,n,lines):

    '''The from c.goToLineNumber that applies to scripts.
    Unlike c.gotoLineNumberOpen, this function returns a position.'''

    if n == 1 or n >= len(lines):
        return root

    # vnodeName, junk, junk, junk, junk = c.convertLineToVnodeNameIndexLine(
        # lines, n, root, scriptFind = True)

    goto = goToLineNumber(c)
    vnodeName,junk,junk,junk = goto.findVnode(
        root,lines,n,ignoreSentinels)

    if vnodeName:
        for p in root.self_and_subtree():
            if p.matchHeadline(vnodeName):
                return p

    return None
#@+node:ekr.20150312225028.16: *5* newPutBody
def newPutBody (self,p,oneNodeOnly=False,fromString=''):

    if p.v in self.excludedTnodesList:
        pass
        # g.trace('ignoring',p.h)
    else:
        self.old_putBody(p,oneNodeOnly,fromString)
#@+node:ekr.20150312225028.17: *4* class ClassScannerClass (not used)
# Called by completeSelf, which is not used.

class ClassScannerClass:

    '''A class to find class definitions in a node or its parents.'''

    @others
#@+node:ekr.20150312225028.18: *5* ctor (classScannerClass)
def __init__ (self,c):

    self.c = c

    # Ignore @root for now:
    # self.start_in_doc = c.config.getBool('at_root_bodies_start_in_doc_mode')

    self.start_in_doc = False
#@+node:ekr.20150312225028.19: *5* scan
def scan (self):

    c = self.c

    className,obj,p = self.findParentClass(c.p)
    # g.trace(className,obj,p)

    if p and not obj:
        parser = c.k.autoCompleter.forgivingParser
        s = parser.parse(p)
    else:
        s = None

    return className,obj,p,s
#@+node:ekr.20150312225028.20: *5* findParentClass
def findParentClass (self,root):

    autoCompleter = self.c.k.autoCompleter

    # First, see if any parent has already been scanned.
    for p in root.self_and_parents():
        obj = autoCompleter.selfVnodesDict.get(p.v)
        if obj:
            # g.trace('found',obj,'in',p.h)
            return None,obj,p

    # Next, do a much slower scan.
    # g.trace('slow scanning...')
    for p in root.self_and_parents():
        className = self.findClass(p)
        if className:
            # g.trace('found',className,'in',p.h)
            return className,None,p

    return None,None,None
#@+node:ekr.20150312225028.21: *5* findClass & helpers
def findClass (self,p):

    lines = g.splitLines(p.b)
    inDoc = self.start_in_doc
    # g.trace(p.h)
    for s in lines:
        if inDoc:
            if self.endsDoc(s):
                inDoc = False
        else:
            if self.startsDoc(s):
                inDoc = True
            else:
                # Not a perfect scan: a triple-string could start with 'class',
                # but perfection is not important.
                className = self.startsClass(s)
                if className: return className
    else:
        return None
#@+node:ekr.20150312225028.22: *6* endsDoc
def endsDoc (self,s):

    return s.startswith('@c')
#@+node:ekr.20150312225028.23: *6* startsClass
def startsClass (self,s):

    if s.startswith('class'):
        i = 5
        i = g.skip_ws(s,i)
        j = g.skip_id(s,i)
        word = s[i:j]
        # g.trace(word)
        return word
    else:
        return None
#@+node:ekr.20150312225028.24: *6* startsDoc
def startsDoc (self,s):

    for s2 in ('@doc','@ ','@\n', '@r', '@\t'):
        if s.startswith(s2):
            return True
    else:
        return False
#@+node:ekr.20150312225028.25: *4* Proxy classes and objects
#@+node:ekr.20150312225028.26: *5* createProxyObjectFromClass
def createProxyObjectFromClass (self,className,theClass):

    '''Create a dummy instance object by instantiating theClass with a dummy ctor.'''

    if 0: # Calling the real ctor is way too dangerous.
        # Set args to the list of required arguments.
        args = inspect.getargs(theClass.__init__.im_func.func_code)
        args = args[0] ; n = len(args)-1
        args = [None for z in range(n)]

    def dummyCtor (self):
        pass

    try:
        obj = None
        old_init = hasattr(theClass,'__init__') and theClass.__init__
        theClass.__init__ = dummyCtor
        obj = theClass()
    finally:
        if old_init:
            theClass.__init__ = old_init
        else:
            delattr(theClass,'__init__')

    g.trace(type(theClass),obj)

    # Verify that it has all the proper attributes.
    # g.trace(g.listToString(dir(obj)))
    return obj
#@+node:ekr.20150312225028.27: *5* createClassObjectFromString
def computeClassObjectFromString (self,className,s):

    try:
        # Add the the class definition to the present environment.
        exec(s) # Security violation!

        # Get the newly created object from the locals dict.
        theClass = locals().get(className)
        return theClass

    except Exception:
        if 1: # Could be a weird kind of user error.
            g.es_print('unexpected exception in',computeProxyObject)
            g.es_exception()
        return None
#@+node:ekr.20150312225028.28: *4* getExternalCompletions
def getExternalCompletions (self,s,p=None,language='python'):

    '''Return the completions possible at the end of string 's'.
       Return (theObject,completions):
    - theObject is None unless the last character is 's' is a period.
    - completions is the list of valid completions.'''

    c = self.c ; k = c.k
    if not p: p = c.p

    # Use a separate widget containing just s.
    self.widget = w = leoFrame.stringTextWidget(c,'compute-completions-widget')
    w.setAllText(s)

    # Scan back for the first period.
    i = len(s)-1
    # while i > 0 and s[i] != '.':
    while i > 0 and g.isWordChar(s[i]):
        i -= 1
    if s[i] == '.': i += 1
    prefix = s[i:].strip()

    # Remember the prefix, but put the insert before the period.
    w.setSelectionRange(i, len(s)-1, insert=i)

    # Init the ivars...
    self.language = p and g.scanForAtLanguage(c,p) or language
    self.tabName = ''
    old_enable = c.k.enable_autocompleter

    # Get the completions.
    try:
        c.k.enable_autocompleter = True
        self.useTabs = False
        self.start(prefix=prefix)
    finally:
        c.k.enable_autocompleter = old_enable
        self.useTabs = True

    theObject,tabList = self.theObject,self.tabList
    self.exit() # Not called from the autocompleter itself.
    return theObject,tabList
#@+node:ekr.20170101133407.1: *3* Importers
#@+node:ekr.20170123041745.1: *4* COPY importers/basescanner.py
'''
Base classes for legacy (character-oriented) importers.

No importers presently use this file. It exists solely for
regression testing.
'''
<< basescanner imports >>
@others
@language python
@tabwidth -4
@pagewidth 60

#@+node:ekr.20170123041745.2: *5* << basescanner imports >>
import leo.core.leoGlobals as g
if g.isPython3:
    import io
    StringIO = io.StringIO
else:
    import StringIO
    StringIO = StringIO.StringIO
import re
# import time
#@+node:ekr.20170123041745.3: *5* class BaseScanner
class BaseScanner(object):
    '''The base class for all legacy (character-oriented) scanner classes.'''

    @others
#@+node:ekr.20170123041745.4: *6* BaseScanner.ctor
def __init__(self, importCommands, atAuto, language='unnamed', alternate_language=None):
    '''ctor for BaseScanner.'''
    ### New, to support IC.check
    self.gen_clean = True
    self.gen_refs = False
    self.name = language
    assert language
    self.ws_error = False
    ### End new
    ic = importCommands
    self.atAuto = atAuto
    self.c = c = ic.c
    self.atAutoWarnsAboutLeadingWhitespace = c.config.getBool(
        'at_auto_warns_about_leading_whitespace')
    self.atAutoSeparateNonDefNodes = c.config.getBool(
        'at_auto_separate_non_DefNodes', default=False)
    self.classId = None
        # The identifier containing the class tag:
        # 'class', 'interface', 'namespace', etc.
    self.codeEnd = None
        # The character after the last character of the class, method or function.
        # An error will be given if this is not a newline.
    self.compare_tokens = True
    self.encoding = ic.encoding
    self.errors = 0
    ic.errors = 0
    self.errorLines = []
    self.escapeSectionRefs = True
    self.extraIdChars = ''
    self.fileName = ic.fileName
        # The original filename.
    self.fileType = ic.fileType
        # The extension,  '.py', '.c', etc.
    self.file_s = ''
        # The complete text to be parsed.
    self.fullChecks = c.config.getBool('full_import_checks')
    self.functionSpelling = 'function'
        # for error message.
    self.importCommands = ic
    self.indentRefFlag = None
        # None, True or False.
    self.isPrepass = False
        # True if we are running at-file-to-at-auto prepass.
    self.isRst = False
    self.language = language
        # The language used to set comment delims.
    self.lastParent = None
        # The last generated parent node (used only by RstScanner).
    self.methodName = ic.methodName
        # x, as in < < x methods > > =
    self.methodsSeen = False
    self.mismatchWarningGiven = False
    self.n_decls = 0
        # For headLineForNode only. The number of decls seen.
    self.output_newline = ic.output_newline
        # = c.config.getBool('output_newline')
    self.output_indent = 0
        # The minimum indentation presently in effect.
    self.root = None
        # The top-level node of the generated tree.
    self.rootLine = ic.rootLine
        # '' or @root + self.fileName
    self.sigEnd = None
        # The index of the end of the signature.
    self.sigId = None
        # The identifier contained in the signature,
        # that is, the function or method name.
    self.sigStart = None
        # The start of the line containing the signature.
        # An error will be given if something other
        # than whitespace precedes the signature.
    self.startSigIndent = None
    self.tab_width = None
        # Set in run: the tab width in effect in the c.currentPosition.
    self.tab_ws = ''
        # Set in run: the whitespace equivalent to one tab.
    self.trace = False or ic.trace
        # ic.trace is c.config.getBool('trace_import')
    self.treeType = ic.treeType
        # '@root' or '@file'
    self.webType = ic.webType
        # 'cweb' or 'noweb'
    # Compute language ivars.
    delim1, junk, junk = g.set_delims_from_language(language)
    self.comment_delim = delim1
    # May be overridden in subclasses...
    self.alternate_language = alternate_language
        # Optional: for @language.
    self.anonymousClasses = []
        # For Delphi Pascal interfaces.
    self.blockCommentDelim1 = None
    self.blockCommentDelim2 = None
    self.blockCommentDelim1_2 = None
    self.blockCommentDelim2_2 = None
    self.blockDelim1 = '{'
    self.blockDelim2 = '}'
    self.blockDelim2Cruft = []
        # Stuff that can follow .blockDelim2.
    self.caseInsensitive = False
    self.classTags = ['class',]
        # tags that start a tag.
    self.functionTags = []
    self.hasClasses = True
    self.hasDecls = True
    self.hasFunctions = True
    self.hasNestedClasses = False
    self.hasRegex = False
    self.ignoreBlankLines = False
    self.ignoreLeadingWs = False
    self.lineCommentDelim = None
    self.lineCommentDelim2 = None
    self.outerBlockDelim1 = None
    self.outerBlockDelim2 = None
    self.outerBlockEndsDecls = True
    self.sigHeadExtraTokens = []
        # Extra tokens valid in head of signature.
    self.sigFailTokens = []
        # A list of strings that abort a signature when seen in a tail.
        # For example, ';' and '=' in C.
    self.strict = False # True if leading whitespace is very significant.
    self.warnAboutUnderindentedLines = True
#@+node:ekr.20170123041745.5: *6* BaseScanner.Code generation
#@+node:ekr.20170123041745.6: *7* BaseScanner.adjustParent
def adjustParent(self, parent, headline):
    '''Return the effective parent.

    This is overridden by the RstScanner class.'''
    return parent
#@+node:ekr.20170123041745.7: *7* BaseScanner.addRef
def addRef(self, parent):
    '''Create an unindented @others or section reference in the parent node.'''
    if self.isRst and not self.atAuto:
        return
    if self.treeType in ('@clean', '@file', '@nosent', None):
        self.appendStringToBody(parent, '@others\n')
    if self.treeType == '@root' and self.methodsSeen:
        self.appendStringToBody(parent,
            g.angleBrackets(' ' + self.methodName + ' methods ') + '\n\n')
#@+node:ekr.20170123041745.8: *7* BaseScanner.appendStringToBody & setBodyString
def appendStringToBody(self, p, s):
    '''Similar to c.appendStringToBody,
    but does not recolor the text or redraw the screen.'''
    return self.importCommands.appendStringToBody(p, s)

def setBodyString(self, p, s):
    '''Similar to c.setBodyString,
    but does not recolor the text or redraw the screen.'''
    return self.importCommands.setBodyString(p, s)
#@+node:ekr.20170123041745.9: *7* BaseScanner.computeBody
def computeBody(self, s, start, sigStart, codeEnd):
    '''Return the head and tail of the body.'''
    trace = False
    body1 = s[start: sigStart]
    # Adjust start backwards to get a better undent.
    if body1.strip():
        while start > 0 and s[start - 1] in (' ', '\t'):
            start -= 1
    # g.trace(repr(s[sigStart:codeEnd]))
    body1 = self.undentBody(s[start: sigStart], ignoreComments=False)
    body2 = self.undentBody(s[sigStart: codeEnd])
    body = body1 + body2
    if trace: g.trace('body: %s' % repr(body))
    tail = body[len(body.rstrip()):]
    if '\n' not in tail:
        self.warning(
            '%s %s does not end with a newline; one will be added\n%s' % (
            self.functionSpelling, self.sigId, g.get_line(s, codeEnd)))
    return body1, body2
#@+node:ekr.20170123041745.10: *7* BaseScanner.createDeclsNode
def createDeclsNode(self, parent, s):
    '''Create a child node of parent containing s.'''
    # Create the node for the decls.
    headline = '%s declarations' % self.methodName
    body = self.undentBody(s)
    self.createHeadline(parent, body, headline)
#@+node:ekr.20170123041745.11: *7* BaseScanner.createFunctionNode
def createFunctionNode(self, headline, body, parent):
    # Create the prefix line for @root trees.
    if self.treeType == '@root':
        prefix = g.angleBrackets(' ' + headline + ' methods ') + '=\n\n'
        self.methodsSeen = True
    else:
        prefix = ''
    # Create the node.
    return self.createHeadline(parent, prefix + body, headline)
#@+node:ekr.20170123041745.12: *7* BaseScanner.createHeadline
def createHeadline(self, parent, body, headline):
    return self.importCommands.createHeadline(parent, body, headline)
#@+node:ekr.20170123041745.13: *7* BaseScanner.endGen
def endGen(self, s):
    '''Do any language-specific post-processing.'''
    pass
#@+node:ekr.20170123041745.14: *7* BaseScanner.getLeadingIndent
def getLeadingIndent(self, s, i, ignoreComments=True):
    '''Return the leading whitespace of a line.
    Ignore blank and comment lines if ignoreComments is True'''
    width = 0
    i = g.find_line_start(s, i)
    if ignoreComments:
        while i < len(s):
            # g.trace(g.get_line(s,i))
            j = g.skip_ws(s, i)
            if g.is_nl(s, j) or g.match(s, j, self.comment_delim):
                i = g.skip_line(s, i) # ignore blank lines and comment lines.
            else:
                i, width = g.skip_leading_ws_with_indent(s, i, self.tab_width)
                break
    else:
        i, width = g.skip_leading_ws_with_indent(s, i, self.tab_width)
    # g.trace('returns:',width)
    return width
#@+node:ekr.20170123041745.15: *7* BaseScanner.indentBody
def indentBody(self, s, lws=None):
    '''Add whitespace equivalent to one tab for all non-blank lines of s.'''
    result = []
    if not lws: lws = self.tab_ws
    for line in g.splitLines(s):
        if line.strip():
            result.append(lws + line)
        elif line.endswith('\n'):
            result.append('\n')
    result = ''.join(result)
    return result
#@+node:ekr.20170123041745.16: *7* BaseScanner.insertIgnoreDirective
def insertIgnoreDirective(self, parent):
    c = self.c
    self.appendStringToBody(parent, '@ignore')
    if g.unitTesting:
        g.app.unitTestDict['fail'] = g.callers()
    else:
        if parent.isAnyAtFileNode() and not parent.isAtAutoNode():
            g.warning('inserting @ignore')
            c.import_error_nodes.append(parent.h)
#@+node:ekr.20170123041745.17: *7* BaseScanner.putClass & helpers
def putClass(self, s, i, sigEnd, codeEnd, start, parent):
    '''Creates a child node c of parent for the class,
    and a child of c for each def in the class.'''
    trace = False
    if trace:
        # g.trace('tab_width',self.tab_width)
        g.trace('sig', repr(s[i: sigEnd]))
    # Enter a new class 1: save the old class info.
    oldMethodName = self.methodName
    oldStartSigIndent = self.startSigIndent
    # Enter a new class 2: init the new class info.
    self.indentRefFlag = None
    class_kind = self.classId
    class_name = self.sigId
    headline = '%s %s' % (class_kind, class_name)
    headline = headline.strip()
    self.methodName = headline
    # Compute the starting lines of the class.
    prefix = self.createClassNodePrefix()
    if not self.sigId:
        g.trace('Can not happen: no sigId')
        self.sigId = 'Unknown class name'
    classHead = s[start: sigEnd]
    i = self.extendSignature(s, sigEnd)
    extend = s[sigEnd: i]
    if extend:
        classHead = classHead + extend
    # Create the class node.
    class_node = self.createHeadline(parent, '', headline)
    # Remember the indentation of the class line.
    undentVal = self.getLeadingIndent(classHead, 0)
    # Call the helper to parse the inner part of the class.
    putRef, bodyIndent, classDelim, decls, trailing = self.putClassHelper(
        s, i, codeEnd, class_node)
    # g.trace('bodyIndent',bodyIndent,'undentVal',undentVal)
    # Set the body of the class node.
    ref = putRef and self.getClassNodeRef(class_name) or ''
    if trace: g.trace('undentVal', undentVal, 'bodyIndent', bodyIndent)
    # Give ref the same indentation as the body of the class.
    if ref:
        bodyWs = g.computeLeadingWhitespace(bodyIndent, self.tab_width)
        ref = '%s%s' % (bodyWs, ref)
    # Remove the leading whitespace.
    result = (
        prefix +
        self.undentBy(classHead, undentVal) +
        self.undentBy(classDelim, undentVal) +
        self.undentBy(decls, undentVal) +
        self.undentBy(ref, undentVal) +
        self.undentBy(trailing, undentVal))
    result = self.adjust_class_ref(result)
    # Append the result to the class node.
    self.appendTextToClassNode(class_node, result)
    # Exit the new class: restore the previous class info.
    self.methodName = oldMethodName
    self.startSigIndent = oldStartSigIndent
#@+node:ekr.20170123041745.18: *8* BaseScanner.adjust_class_ref
def adjust_class_ref(self, s):
    '''Over-ridden by xml and html scanners.'''
    return s
#@+node:ekr.20170123041745.19: *8* BaseScanner.appendTextToClassNode
def appendTextToClassNode(self, class_node, s):
    self.appendStringToBody(class_node, s)
#@+node:ekr.20170123041745.20: *8* BaseScanner.createClassNodePrefix
def createClassNodePrefix(self):
    '''Create the class node prefix.'''
    if self.treeType == '@root':
        prefix = g.angleBrackets(' ' + self.methodName + ' methods ') + '=\n\n'
        self.methodsSeen = True
    else:
        prefix = ''
    return prefix
#@+node:ekr.20170123041745.21: *8* BaseScanner.getClassNodeRef
def getClassNodeRef(self, class_name):
    '''Insert the proper body text in the class_vnode.'''
    if self.treeType in ('@clean', '@file', '@nosent', None):
        s = '@others'
    else:
        s = g.angleBrackets(' class %s methods ' % (class_name))
    return '%s\n' % (s)
#@+node:ekr.20170123041745.22: *8* BaseScanner.putClassHelper
def putClassHelper(self, s, i, end, class_node):
    '''s contains the body of a class, not including the signature.

    Parse s for inner methods and classes, and create nodes.'''
    trace = False and not g.unitTesting
    # Increase the output indentation (used only in startsHelper).
    # This allows us to detect over-indented classes and functions.
    old_output_indent = self.output_indent
    self.output_indent += abs(self.tab_width)
    # Parse the decls.
    if self.hasDecls: # 2011/11/11
        j = i; i = self.skipDecls(s, i, end, inClass=True)
        decls = s[j: i]
    else:
        decls = ''
    # Set the body indent if there are real decls.
    bodyIndent = decls.strip() and self.getIndent(s, i) or None
    if trace: g.trace('bodyIndent', bodyIndent)
    # Parse the rest of the class.
    delim1, delim2 = self.outerBlockDelim1, self.outerBlockDelim2
    if g.match(s, i, delim1):
        # Do *not* use g.skip_ws_and_nl here!
        j = g.skip_ws(s, i + len(delim1))
        if g.is_nl(s, j): j = g.skip_nl(s, j)
        classDelim = s[i: j]
        end2 = self.skipBlock(s, i, delim1=delim1, delim2=delim2)
        start, putRef, bodyIndent2 = self.scanHelper(s, j, end=end2, parent=class_node, kind='class')
    else:
        classDelim = ''
        start, putRef, bodyIndent2 = self.scanHelper(s, i, end=end, parent=class_node, kind='class')
    if bodyIndent is None: bodyIndent = bodyIndent2
    # Restore the output indentation.
    self.output_indent = old_output_indent
    # Return the results.
    trailing = s[start: end]
    return putRef, bodyIndent, classDelim, decls, trailing
#@+node:ekr.20170123041745.23: *7* BaseScanner.putFunction
def putFunction(self, s, sigStart, codeEnd, start, parent):
    '''Create a node of parent for a function defintion.'''
    trace = False and not g.unitTesting
    verbose = True
    # Enter a new function: save the old function info.
    oldStartSigIndent = self.startSigIndent
    if self.sigId:
        headline = self.sigId
    else:
        g.trace('Can not happen: no sigId')
        headline = 'unknown function'
    body1, body2 = self.computeBody(s, start, sigStart, codeEnd)
    body = body1 + body2
    parent = self.adjustParent(parent, headline)
    if trace:
        # pylint: disable=maybe-no-member
        g.trace('parent', parent and parent.h)
        if verbose:
            # g.trace('**body1...\n',body1)
            g.trace(self.atAutoSeparateNonDefNodes)
            g.trace('**body...\n%s' % body)
    # 2010/11/04: Fix wishlist bug 670744.
    if self.atAutoSeparateNonDefNodes:
        if body1.strip():
            if trace: g.trace('head', body1)
            line1 = g.splitLines(body1.lstrip())[0]
            line1 = line1.strip() or 'non-def code'
            self.createFunctionNode(line1, body1, parent)
            body = body2
    self.lastParent = self.createFunctionNode(headline, body, parent)
    # Exit the function: restore the function info.
    self.startSigIndent = oldStartSigIndent
#@+node:ekr.20170123041745.24: *7* BaseScanner.putRootText
def putRootText(self, p):
    self.appendStringToBody(p, '%s@language %s\n@tabwidth %d\n' % (
        self.rootLine, self.alternate_language or self.language, self.tab_width))
#@+node:ekr.20170123041745.25: *7* BaseScanner.undentBody & helper
def undentBody(self, s, ignoreComments=True):
    '''Remove the first line's leading indentation from all lines of s.'''
    trace = False and not g.unitTesting
    verbose = False
    if trace and verbose: g.trace('before...\n', g.listToString(g.splitLines(s)))
    if self.isRst:
        return s # Never unindent rst code.
    # Calculate the amount to be removed from each line.
    undentVal = self.getLeadingIndent(s, 0, ignoreComments=ignoreComments)
    if trace: g.trace(undentVal, g.splitLines(s)[0].rstrip())
    if undentVal == 0:
        return s
    else:
        result = self.undentBy(s, undentVal)
        if trace and verbose: g.trace('after...\n', g.listToString(g.splitLines(result)))
        return result
#@+node:ekr.20170123041745.26: *8* BaseScanner.undentBy
def undentBy(self, s, undentVal):
    '''Remove leading whitespace equivalent to undentVal from each line.
    For strict languages, add an underindentEscapeString for underindented line.'''
    trace = False and not g.app.unitTesting
    if self.isRst:
        return s # Never unindent rst code.
    tag = self.c.atFileCommands.underindentEscapeString
    result = []; tab_width = self.tab_width
    for line in g.splitlines(s):
        lws_s = g.get_leading_ws(line)
        lws = g.computeWidth(lws_s, tab_width)
        s = g.removeLeadingWhitespace(line, undentVal, tab_width)
        # 2011/10/29: Add underindentEscapeString only for strict languages.
        if self.strict and s.strip() and lws < undentVal:
            if trace: g.trace('undentVal: %s, lws: %s, %s' % (
                undentVal, lws, repr(line)))
            # Bug fix 2012/06/05: end the underindent count with a period,
            # to protect against lines that start with a digit!
            result.append("%s%s.%s" % (tag, undentVal - lws, s.lstrip()))
        else:
            if trace: g.trace(repr(s))
            result.append(s)
    return ''.join(result)
#@+node:ekr.20170123041745.27: *7* BaseScanner.underindentedComment & underindentedLine
def underindentedComment(self, line):
    if self.atAutoWarnsAboutLeadingWhitespace:
        self.warning(
            'underindented python comments.\nExtra leading whitespace will be added\n' + line)

def underindentedLine(self, line):
    if self.warnAboutUnderindentedLines:
        self.error(
            'underindented line.\n' +
            'Extra leading whitespace will be added\n' + line)
#@+node:ekr.20170123041745.28: *6* BaseScanner.error, oops, report and warning
def error(self, s):
    self.errors += 1
    self.importCommands.errors += 1
    if g.unitTesting:
        if self.errors == 1:
            g.app.unitTestDict['actualErrorMessage'] = s
        g.app.unitTestDict['actualErrors'] = self.errors
        if 0: # For debugging unit tests.
            g.trace(g.callers())
            g.error('', s)
    else:
        g.error('Error:', s)

def oops(self):
    g.pr('BaseScanner oops: %s must be overridden in subclass' % g.callers())

def report(self, message):
    if self.strict: self.error(message)
    else: self.warning(message)

def warning(self, s):
    if not g.unitTesting:
        g.warning('Warning:', s)
#@+node:ekr.20170123041745.29: *6* BaseScanner.headlineForNode
def headlineForNode(self, fn, p):
    '''Return the expected imported headline for p.b'''
    trace = False and not g.unitTesting
    # From scan: parse the decls.s
    s = p.b
    if False: # and self.n_decls == 0 and self.hasDecls:
        i = self.skipDecls(s, 0, len(s), inClass=False)
        decls = s[: i]
        if decls:
            self.n_decls += 1
            val = '%s declarations' % fn
            if trace and val != p.h: g.trace(p.h, '==>', val)
            return val
    # From scanHelper: look for the first def or class.
    i = 0
    while i < len(s):
        progress = i
        if s[i] in (' ', '\t', '\n'):
            i += 1 # Prevent lookahead below, and speed up the scan.
        elif self.startsComment(s, i):
            i = self.skipComment(s, i)
        elif self.startsString(s, i):
            i = self.skipString(s, i)
        elif self.startsClass(s, i):
            val = 'class ' + self.sigId
            if trace and val != p.h: g.trace(p.h, '==>', val)
            return val
        elif self.startsFunction(s, i):
            val = self.sigId
            if trace and val != p.h: g.trace(p.h, '==>', val)
            return val
        elif self.startsId(s, i):
            i = self.skipId(s, i)
        elif g.match(s, i, self.outerBlockDelim1): # and kind == 'outer'
            # Do this after testing for classes.
            i = self.skipBlock(s, i, delim1=self.outerBlockDelim1, delim2=self.outerBlockDelim2)
        else: i += 1
        if progress >= i:
            i = self.skipBlock(s, i, delim1=self.outerBlockDelim1, delim2=self.outerBlockDelim2)
        assert progress < i, 'i: %d, ch: %s' % (i, repr(s[i]))
    return p.h
#@+node:ekr.20170123041745.30: *6* BaseScanner.Parsing
@ Scan and skipDecls would typically not be overridden.
#@+node:ekr.20170123041745.31: *7* BaseScanner.adjustDefStart
def adjustDefStart(self, unused_s, i):
    '''A hook to allow the Python importer to adjust the
    start of a class or function to include decorators.'''
    return i
#@+node:ekr.20170123041745.32: *7* BaseScanner.extendSignature
def extendSignature(self, unused_s, i):
    '''Extend the signature line if appropriate.
    The text *must* end with a newline.

    For example, the Python scanner appends docstrings if they exist.'''
    return i
#@+node:ekr.20170123041745.33: *7* BaseScanner.getIndent
def getIndent(self, s, i):
    j, junk = g.getLine(s, i)
    junk, indent = g.skip_leading_ws_with_indent(s, j, self.tab_width)
    return indent
#@+node:ekr.20170123041745.34: *7* BaseScanner.prepass & helper
def prepass(self, s, p):
    '''
    A prepass for the at-file-to-at-auto command.
    Return (ok,aList)
    ok: False if p.b should be split two or more sibling nodes.
    aList: a list of tuples (i,j,headline,p) indicating split nodes.
    '''
    trace = False and not g.unitTesting
    # From scanHelper...
    delim1, delim2 = self.outerBlockDelim1, self.outerBlockDelim2
    p = p.copy()
    classSeen, refSeen = False, False
    i, n, parts, start = 0, 0, [], 0
    while i < len(s):
        progress = i
        if s[i] in (' ', '\t', '\n'):
            i += 1 # Prevent lookahead below, and speed up the scan.
        elif self.startsComment(s, i):
            i = self.skipComment(s, i)
        elif self.startsString(s, i):
            i = self.skipString(s, i)
        elif s[i: i + 2] == '<<':
            j = g.skip_line(s, i + 2)
            k = s.find('>>', i + 2)
            if -1 < k < j:
                g.trace(s[i: k + 2])
                refSeen = True
                i = k + 2
            else:
                i += 2
        elif self.startsClass(s, i):
            # g.trace('class',i,s[i:i+20])
            # Extend the previous definition.
            if n > 0 and start < i:
                i1, i2, id2, p2 = parts.pop()
                parts.append((i1, i, id2, p2),)
            classSeen = True
            start, i = i, self.codeEnd
            parts.append((start, i, self.sigId, p),)
            n += 1
        elif self.startsFunction(s, i):
            # g.trace('func',i,s[i:i+20])
            # Extend the previous definition.
            if n > 0 and start < i:
                i1, i2, id2, p2 = parts.pop()
                parts.append((i1, i, id2, p2),)
            start, i = i, self.codeEnd
            parts.append((start, i, self.sigId, p),)
            n += 1
        elif self.startsId(s, i):
            i = self.skipId(s, i)
        elif g.match(s, i, delim1):
            # Do this after testing for classes.
            i = self.skipBlock(s, i, delim1, delim2)
        else:
            i += 1
        if progress >= i:
            i = self.skipBlock(s, i, delim1, delim2)
        assert progress < i, 'i: %d, ch: %s' % (i, repr(s[i]))
    if n > 0 and start < i:
        i1, i2, id2, p2 = parts.pop()
        parts.append((i1, len(s), id2, p2),)
    if n <= 1 and not refSeen:
        return True, [] # Only one definition.
    elif p.hasChildren() or classSeen or refSeen:
        # Can't split safely.
        if trace: g.trace('can not split\n', ''.join([
            '\n----- %s\n%s\n' % (z[2], s[z[0]: z[1]]) for z in parts]))
        return False, []
    else:
        # Multiple defs, no children. Will split the node into children.
        return False, parts
#@+node:ekr.20170123041745.35: *7* BaseScanner.scan & scanHelper
def scan(self, s, parent, parse_body=False):
    '''A language independent scanner: it uses language-specific helpers.

    Create a child of self.root for:
    - Leading outer-level declarations.
    - Outer-level classes.
    - Outer-level functions.
    '''
    # Init the parser status ivars.
    self.methodsSeen = False
    # Create the initial body text in the root.
    if parse_body:
        pass
    else:
        self.putRootText(parent)
    # Parse the decls.
    if self.hasDecls:
        i = self.skipDecls(s, 0, len(s), inClass=False)
        decls = s[: i]
    else:
        i, decls = 0, ''
    # Create the decls node.
    if decls: self.createDeclsNode(parent, decls)
    # Scan the rest of the file.
    start, junk, junk = self.scanHelper(s, i, end=len(s), parent=parent, kind='outer')
    # Finish adding to the parent's body text.
    self.addRef(parent)
    if start < len(s):
        self.appendStringToBody(parent, s[start:])
    # Do any language-specific post-processing.
    self.endGen(s)
#@+node:ekr.20170123041745.36: *8* BaseScanner.scanHelper
def scanHelper(self, s, i, end, parent, kind):
    '''Common scanning code used by both scan and putClassHelper.'''
    # g.trace(g.callers())
    # g.trace('i',i,g.get_line(s,i))
    assert kind in ('class', 'outer')
    start = i; putRef = False; bodyIndent = None
    # Major change: 2011/11/11: prevent scanners from going beyond end.
    if self.hasNestedClasses and end < len(s):
        s = s[: end] # Potentially expensive, but unavoidable.
    # if g.unitTesting: g.pdb()
    while i < end:
        progress = i
        if s[i] in (' ', '\t', '\n'):
            i += 1 # Prevent lookahead below, and speed up the scan.
        elif self.startsComment(s, i):
            i = self.skipComment(s, i)
        elif self.startsString(s, i):
            i = self.skipString(s, i)
        elif self.hasRegex and self.startsRegex(s, i):
            i = self.skipRegex(s, i)
        elif self.startsClass(s, i): # Sets sigStart,sigEnd & codeEnd ivars.
            putRef = True
            if bodyIndent is None: bodyIndent = self.getIndent(s, i)
            end2 = self.codeEnd # putClass may change codeEnd ivar.
            self.putClass(s, i, self.sigEnd, self.codeEnd, start, parent)
            i = start = end2
        elif self.startsFunction(s, i): # Sets sigStart,sigEnd & codeEnd ivars.
            putRef = True
            if bodyIndent is None: bodyIndent = self.getIndent(s, i)
            self.putFunction(s, self.sigStart, self.codeEnd, start, parent)
            i = start = self.codeEnd
        elif self.startsId(s, i):
            i = self.skipId(s, i)
        elif kind == 'outer' and g.match(s, i, self.outerBlockDelim1): # Do this after testing for classes.
            # i1 = i # for debugging
            i = self.skipBlock(s, i, delim1=self.outerBlockDelim1, delim2=self.outerBlockDelim2)
            # Bug fix: 2007/11/8: do *not* set start: we are just skipping the block.
        else: i += 1
        if progress >= i:
            # g.pdb()
            i = self.skipBlock(s, i, delim1=self.outerBlockDelim1, delim2=self.outerBlockDelim2)
        assert progress < i, 'i: %d, ch: %s' % (i, repr(s[i]))
    return start, putRef, bodyIndent
#@+node:ekr.20170123041745.37: *7* BaseScanner.Parser skip methods
#@+node:ekr.20170123041745.38: *8* BaseScanner.isSpace
def isSpace(self, s, i):
    '''Return true if s[i] is a tokenizer space.'''
    # g.trace(repr(s[i]),i < len(s) and s[i] != '\n' and s[i].isspace())
    return i < len(s) and s[i] != '\n' and s[i].isspace()
#@+node:ekr.20170123041745.39: *8* BaseScanner.skipArgs
def skipArgs(self, s, i, kind):
    '''Skip the argument or class list.  Return i, ok

    kind is in ('class','function')'''
    start = i
    i = g.skip_ws_and_nl(s, i)
    if not g.match(s, i, '('):
        return start, kind == 'class'
    i = self.skipParens(s, i)
    # skipParens skips the ')'
    if i >= len(s):
        return start, False
    else:
        return i, True
#@+node:ekr.20170123041745.40: *8* BaseScanner.skipBlock
def skipBlock(self, s, i, delim1=None, delim2=None):
    '''Skip from the opening delim to *past* the matching closing delim.

    If no matching is found i is set to len(s)'''
    trace = False and not g.unitTesting
    verbose = False
    if delim1 is None: delim1 = self.blockDelim1
    if delim2 is None: delim2 = self.blockDelim2
    match1 = g.match if len(delim1) == 1 else g.match_word
    match2 = g.match if len(delim2) == 1 else g.match_word
    assert match1(s, i, delim1)
    level, start, startIndent = 0, i, self.startSigIndent
    if trace and verbose:
        g.trace('***', 'startIndent', startIndent)
    while i < len(s):
        progress = i
        if g.is_nl(s, i):
            backslashNewline = i > 0 and g.match(s, i - 1, '\\\n')
            i = g.skip_nl(s, i)
            if not backslashNewline and not g.is_nl(s, i):
                j, indent = g.skip_leading_ws_with_indent(s, i, self.tab_width)
                line = g.get_line(s, j)
                if trace and verbose: g.trace('indent', indent, line)
                if indent < startIndent and line.strip():
                    # An non-empty underindented line.
                    # Issue an error unless it contains just the closing bracket.
                    if level == 1 and match2(s, j, delim2):
                        pass
                    else:
                        if j not in self.errorLines: # No error yet given.
                            self.errorLines.append(j)
                            self.underindentedLine(line)
        elif s[i] in (' ', '\t',):
            i += 1 # speed up the scan.
        elif self.startsComment(s, i):
            i = self.skipComment(s, i)
        elif self.startsString(s, i):
            i = self.skipString(s, i)
        elif match1(s, i, delim1):
            level += 1; i += len(delim1)
        elif match2(s, i, delim2):
            level -= 1; i += len(delim2)
            # Skip junk following Pascal 'end'
            for z in self.blockDelim2Cruft:
                i2 = self.skipWs(s, i)
                if g.match(s, i2, z):
                    i = i2 + len(z)
                    break
            if level <= 0:
                # 2010/09/20
                # Skip a single-line comment if it exists.
                j = self.skipWs(s, i)
                if (g.match(s, j, self.lineCommentDelim) or
                    g.match(s, j, self.lineCommentDelim2)
                ):
                    i = g.skip_to_end_of_line(s, i)
                if trace: g.trace('returns:\n\n%s\n\n' % s[start: i])
                return i
        else: i += 1
        assert progress < i
    self.error('no block: %s' % self.root.h)
    if 1:
        i, j = g.getLine(s, start)
        g.trace(i, s[i: j])
    else:
        if trace: g.trace('** no block')
    return start + 1 # 2012/04/04: Ensure progress in caller.
#@+node:ekr.20170123041745.41: *8* BaseScanner.skipCodeBlock
def skipCodeBlock(self, s, i, kind):
    '''Skip the code block in a function or class definition.'''
    trace = False
    start = i
    i = self.skipBlock(s, i, delim1=None, delim2=None)
    if self.sigFailTokens:
        i = self.skipWs(s, i)
        for z in self.sigFailTokens:
            if g.match(s, i, z):
                if trace: g.trace('failtoken', z)
                return start, False
    if i > start:
        i = self.skipNewline(s, i, kind)
    if trace:
        # g.trace(g.callers())
        # g.trace('returns...\n',g.listToString(g.splitLines(s[start:i])))
        g.trace('returns:\n\n%s\n\n' % s[start: i])
    return i, True
#@+node:ekr.20170123041745.42: *8* BaseScanner.skipComment & helper
def skipComment(self, s, i):
    '''Skip a comment and return the index of the following character.'''
    if g.match(s, i, self.lineCommentDelim) or g.match(s, i, self.lineCommentDelim2):
        return g.skip_to_end_of_line(s, i)
    else:
        return self.skipBlockComment(s, i)
#@+node:ekr.20170123041745.43: *9* BaseScanner.skipBlockComment
def skipBlockComment(self, s, i):
    '''Skip past a block comment.'''
    start = i
    # Skip the opening delim.
    if g.match(s, i, self.blockCommentDelim1):
        delim2 = self.blockCommentDelim2
        i += len(self.blockCommentDelim1)
    elif g.match(s, i, self.blockCommentDelim1_2):
        i += len(self.blockCommentDelim1_2)
        delim2 = self.blockCommentDelim2_2
    else:
        assert False
    # Find the closing delim.
    k = s.find(delim2, i)
    if k == -1:
        self.error('Run on block comment: ' + s[start: i])
        return len(s)
    else:
        return k + len(delim2)
#@+node:ekr.20170123041745.44: *8* BaseScanner.skipDecls
def skipDecls(self, s, i, end, inClass):
    '''
    Skip everything until the start of the next class or function.
    The decls *must* end in a newline.
    '''
    trace = False or self.trace
    start = i; prefix = None
    classOrFunc = False
    if trace: g.trace(g.callers())
    # Major change: 2011/11/11: prevent scanners from going beyond end.
    if self.hasNestedClasses and end < len(s):
        s = s[: end] # Potentially expensive, but unavoidable.
    while i < end:
        progress = i
        if s[i] in (' ', '\t', '\n'):
            i += 1 # Prevent lookahead below, and speed up the scan.
        elif self.startsComment(s, i):
            # Add the comment to the decl if it *doesn't* start the line.
            i2, junk = g.getLine(s, i)
            i2 = self.skipWs(s, i2)
            if i2 == i and prefix is None:
                prefix = i2 # Bug fix: must include leading whitespace in the comment.
            i = self.skipComment(s, i)
        elif self.startsString(s, i):
            i = self.skipString(s, i)
            prefix = None
        elif self.startsClass(s, i):
            # Important: do not include leading ws in the decls.
            classOrFunc = True
            i = g.find_line_start(s, i)
            i = self.adjustDefStart(s, i)
            break
        elif self.startsFunction(s, i):
            # Important: do not include leading ws in the decls.
            classOrFunc = True
            i = g.find_line_start(s, i)
            i = self.adjustDefStart(s, i)
            break
        elif self.startsId(s, i):
            i = self.skipId(s, i)
            prefix = None
        # Don't skip outer blocks: they may contain classes.
        elif g.match(s, i, self.outerBlockDelim1):
            if self.outerBlockEndsDecls:
                break
            else:
                i = self.skipBlock(s, i,
                    delim1=self.outerBlockDelim1,
                    delim2=self.outerBlockDelim2)
        else:
            i += 1; prefix = None
        assert(progress < i)
    if prefix is not None:
        i = g.find_line_start(s, prefix) # i = prefix
    decls = s[start: i]
    if inClass and not classOrFunc:
        # Don't return decls if a class contains nothing but decls.
        if trace and decls.strip(): g.trace('**class is all decls...\n', decls)
        return start
    elif decls.strip():
        if trace or self.trace: g.trace('\n' + decls)
        return i
    else: # Ignore empty decls.
        return start
#@+node:ekr.20170123041745.45: *8* BaseScanner.skipId
def skipId(self, s, i):
    return g.skip_id(s, i, chars=self.extraIdChars)
#@+node:ekr.20170123041745.46: *8* BaseScanner.skipNewline
def skipNewline(self, s, i, kind):
    '''
    Called by skipCodeBlock to terminate a function defintion.
    Skip whitespace and comments up to a newline, then skip the newline.
    Issue an error if no newline is found.
    '''
    while i < len(s):
        i = self.skipWs(s, i)
        if self.startsComment(s, i):
            i = self.skipComment(s, i)
        else: break
    if i >= len(s):
        return len(s)
    if g.match(s, i, '\n'):
        i += 1
    else:
        self.error(
            '%s %s does not end in a newline; one will be added\n%s' % (
                kind, self.sigId, g.get_line(s, i)))
    return i
#@+node:ekr.20170123041745.47: *8* BaseScanner.skipParens
def skipParens(self, s, i):
    '''Skip a parenthisized list, that might contain strings or comments.'''
    return self.skipBlock(s, i, delim1='(', delim2=')')
#@+node:ekr.20170123041745.48: *8* BaseScanner.skipRegex
def skipRegex(self, s, i):
    '''Skip the regular expression starting at s[i].'''
    delim = s[i]
    i1 = i
    i += 1
    while i < len(s):
        if s[i] == delim:
            # Count the preceding backslashes.
            i2, n = i - 1, 0
            while 0 <= i2 and s[i2] == '\\':
                n += 1
                i2 -= 1
            if (n % 2) == 0:
                return i + 1
        i += 1
    g.trace('unterminated regex starting at', i1)
    return i

#@+node:ekr.20170123041745.49: *8* BaseScanner.skipString
def skipString(self, s, i):
    # Returns len(s) on unterminated string.
    return g.skip_string(s, i, verbose=False)
#@+node:ekr.20170123041745.50: *8* BaseScanner.skipWs
def skipWs(self, s, i):
    return g.skip_ws(s, i)
#@+node:ekr.20170123041745.51: *7* BaseScanner.Parser starts methods
#@+node:ekr.20170123041745.52: *8* BaseScanner.startsClass/Function & helpers
# We don't expect to override this code, but subclasses may override the helpers.

def startsClass(self, s, i):
    '''Return True if s[i:] starts a class definition.
    Sets sigStart, sigEnd, sigId and codeEnd ivars.'''
    val = self.hasClasses and self.startsHelper(s, i, kind='class', tags=self.classTags)
    return val

def startsFunction(self, s, i):
    '''Return True if s[i:] starts a function.
    Sets sigStart, sigEnd, sigId and codeEnd ivars.'''
    val = self.hasFunctions and self.startsHelper(s, i, kind='function', tags=self.functionTags)
    return val
#@+node:ekr.20170123041745.53: *9* BaseScanner.getSigId
def getSigId(self, ids):
    '''Return the signature's id.

    By default, this is the last id in the ids list.'''
    return ids and ids[-1]
#@+node:ekr.20170123041745.54: *9* BaseScanner.skipSigStart
def skipSigStart(self, s, i, kind, tags):
    '''Skip over the start of a function/class signature.

    tags is in (self.classTags,self.functionTags).

    Return (i,ids) where ids is list of all ids found, in order.'''
    trace = False and self.trace # or kind =='function'
    ids = []; classId = None
    if trace: g.trace('*entry', kind, i, s[i: i + 20])
    start = i
    while i < len(s):
        j = g.skip_ws_and_nl(s, i)
        for z in self.sigFailTokens:
            if g.match(s, j, z):
                if trace: g.trace('failtoken', z, 'ids', ids)
                return start, [], None
        for z in self.sigHeadExtraTokens:
            if g.match(s, j, z):
                i += len(z); break
        else:
            i = self.skipId(s, j)
            theId = s[j: i]
            if theId and theId in tags: classId = theId
            if theId: ids.append(theId)
            else: break
    if trace: g.trace('*exit ', kind, i, i < len(s) and s[i], ids, classId)
    return i, ids, classId
#@+node:ekr.20170123041745.55: *9* BaseScanner.skipSigTail
def skipSigTail(self, s, i, kind):
    '''Skip from the end of the arg list to the start of the block.'''
    trace = False and self.trace
    start = i
    i = self.skipWs(s, i)
    for z in self.sigFailTokens:
        if g.match(s, i, z):
            if trace: g.trace('failToken', z, 'line', g.skip_line(s, i))
            return i, False
    while i < len(s):
        if self.startsComment(s, i):
            i = self.skipComment(s, i)
        elif g.match(s, i, self.blockDelim1):
            if trace: g.trace(repr(s[start: i]))
            return i, True
        else:
            i += 1
    if trace: g.trace('no block delim')
    return i, False
#@+node:ekr.20170123041745.56: *9* BaseScanner.startsHelper
def startsHelper(self, s, i, kind, tags, tag=None):
    '''
    tags is a list of id's.  tag is a debugging tag.
    return True if s[i:] starts a class or function.
    Sets sigStart, sigEnd, sigId and codeEnd ivars.
    '''
    trace = False or self.trace
    verbose = True # kind=='function'
    self.codeEnd = self.sigEnd = self.sigId = None
    self.sigStart = i
    # Underindented lines can happen in any language, not just Python.
    # The skipBlock method of the base class checks for such lines.
    self.startSigIndent = self.getLeadingIndent(s, i)
    # Get the tag that starts the class or function.
    j = g.skip_ws_and_nl(s, i)
    i = self.skipId(s, j)
    self.sigId = theId = s[j: i] # Set sigId ivar 'early' for error messages.
    if not theId: return False
    if tags:
        if self.caseInsensitive:
            theId = theId.lower()
        if theId not in tags:
            if trace and verbose:
                # g.trace('**** %s theId: %s not in tags: %s' % (kind,theId,tags))
                g.trace('%8s: ignoring %s' % (kind, theId))
            return False
    if trace and verbose: g.trace('kind', kind, 'id', theId)
    # Get the class/function id.
    if kind == 'class' and self.sigId in self.anonymousClasses:
        # A hack for Delphi Pascal: interfaces have no id's.
        # g.trace('anonymous',self.sigId)
        classId = theId
        sigId = ''
    else:
        i, ids, classId = self.skipSigStart(s, j, kind, tags) # Rescan the first id.
        sigId = self.getSigId(ids)
        if not sigId:
            if trace and verbose: g.trace('**no sigId', g.get_line(s, i))
            return False
    if self.output_indent < self.startSigIndent:
        if trace: g.trace('**over-indent', sigId)
            #,'output_indent',self.output_indent,'startSigIndent',self.startSigIndent)
        return False
    # Skip the argument list.
    i, ok = self.skipArgs(s, i, kind)
    if not ok:
        if trace and verbose: g.trace('no args', g.get_line(s, i))
        return False
    i = g.skip_ws_and_nl(s, i)
    # Skip the tail of the signature
    i, ok = self.skipSigTail(s, i, kind)
    if not ok:
        if trace and verbose: g.trace('no tail', g.get_line(s, i))
        return False
    sigEnd = i
    # A trick: make sure the signature ends in a newline,
    # even if it overlaps the start of the block.
    if not g.match(s, sigEnd, '\n') and not g.match(s, sigEnd - 1, '\n'):
        if trace and verbose: g.trace('extending sigEnd')
        sigEnd = g.skip_line(s, sigEnd)
    if self.blockDelim1:
        i = g.skip_ws_and_nl(s, i)
        if kind == 'class' and self.sigId in self.anonymousClasses:
            pass # Allow weird Pascal unit's.
        elif not g.match(s, i, self.blockDelim1):
            if trace and verbose: g.trace('no block', g.get_line(s, i))
            return False
    i, ok = self.skipCodeBlock(s, i, kind)
    if not ok: return False
        # skipCodeBlock skips the trailing delim.
    # This assert ensures that all class/function/method definitions end with a newline.
    # It would be False for language like html/xml, but they override this method.
    if self.language != 'javascript':
        assert i > 0 and s[i - 1] == '\n' or i == len(s), (i, len(s))
    # Success: set the ivars.
    self.sigStart = self.adjustDefStart(s, self.sigStart)
    self.codeEnd = i
    self.sigEnd = sigEnd
    self.sigId = sigId
    self.classId = classId
    # Note: backing up here is safe because
    # we won't back up past scan's 'start' point.
    # Thus, characters will never be output twice.
    k = self.sigStart
    if not g.match(s, k, '\n'):
        self.sigStart = g.find_line_start(s, k)
    # Issue this warning only if we have a real class or function.
    if 0: # wrong.
        if s[self.sigStart: k].strip():
            self.error('%s definition does not start a line\n%s' % (
                kind, g.get_line(s, k)))
    if trace:
        if verbose:
            g.trace(kind, 'returns:\n%s' % s[self.sigStart: i])
        else:
            first_line = g.splitLines(s[self.sigStart: i])[0]
            g.trace(kind, first_line.rstrip())
    return True
#@+node:ekr.20170123041745.57: *8* BaseScanner.startsComment
def startsComment(self, s, i):
    return (
        g.match(s, i, self.lineCommentDelim) or
        g.match(s, i, self.lineCommentDelim2) or
        g.match(s, i, self.blockCommentDelim1) or
        g.match(s, i, self.blockCommentDelim1_2)
    )
#@+node:ekr.20170123041745.58: *8* BaseScanner.startsId
def startsId(self, s, i):
    return g.is_c_id(s[i: i + 1])
#@+node:ekr.20170123041745.59: *8* BaseScanner.startsRegex
def startsRegex(self, s, i):
    '''Return True if s[i] starts a regular expression.'''
    return s[i] == '/'
#@+node:ekr.20170123041745.60: *8* BaseScanner.startsString
def startsString(self, s, i):
    return g.match(s, i, '"') or g.match(s, i, "'")
#@+node:ekr.20170123041745.61: *6* BaseScanner.run
def run(self, s, parent, parse_body=False, prepass=False):
    '''The common top-level code for all scanners.'''
    self.isPrepass = prepass
    c = self.c
    self.root = root = parent.copy()
    self.file_s = s
    self.tab_width = c.getTabWidth(p=root)
    # Create the ws equivalent to one tab.
    self.tab_ws = ' ' * abs(self.tab_width) if self.tab_width < 0 else '\t'
    # Init the error/status info.
    self.errors = 0
    self.errorLines = []
    self.mismatchWarningGiven = False
    changed = c.isChanged()
    # Use @verbatim to escape section references (but not for @auto).
    if self.escapeSectionRefs and not self.atAuto:
        s = self.escapeFalseSectionReferences(s)
    # Check for intermixed blanks and tabs.
    if self.strict or self.atAutoWarnsAboutLeadingWhitespace:
        if not self.isRst:
            self.checkBlanksAndTabs(s)
    # Regularize leading whitespace (strict languages only).
    if self.strict: s = self.regularizeWhitespace(s)
    # Generate the nodes, including directives and section references.
    if self.isPrepass:
        return self.prepass(s, parent)
    else:
        self.scan(s, parent, parse_body=parse_body)
        # Check the generated nodes.
        # Return True if the result is equivalent to the original file.
        ok = self.errors == 0 and self.check(s, parent)
        g.app.unitTestDict['result'] = ok
        # Insert an @ignore directive if there were any serious problems.
        if not ok: self.insertIgnoreDirective(parent)
        # It's always useless for an an import to dirty the outline.
        for p in root.self_and_subtree():
            p.clearDirty()
        c.setChanged(changed)
        return ok
#@+node:ekr.20170123041745.62: *7* BaseScanner.escapeFalseSectionReferences
def escapeFalseSectionReferences(self, s):
    '''
    Probably a bad idea.  Keep the apparent section references.
    The perfect-import write code no longer attempts to expand references
    when the perfectImportFlag is set.
    '''
    return s
    # result = []
    # for line in g.splitLines(s):
        # r1 = line.find('<<')
        # r2 = line.find('>>')
        # if r1>=0 and r2>=0 and r1<r2:
            # result.append("@verbatim\n")
            # result.append(line)
        # else:
            # result.append(line)
    # return ''.join(result)
#@+node:ekr.20170123041745.63: *7* BaseScanner.checkBlanksAndTabs
def checkBlanksAndTabs(self, s):
    '''Check for intermixed blank & tabs.'''
    # Do a quick check for mixed leading tabs/blanks.
    blanks = tabs = 0
    for line in g.splitLines(s):
        lws = line[0: g.skip_ws(line, 0)]
        blanks += lws.count(' ')
        tabs += lws.count('\t')
    ok = blanks == 0 or tabs == 0
    if not ok:
        self.report('intermixed blanks and tabs')
    return ok
#@+node:ekr.20170123041745.64: *7* BaseScanner.regularizeWhitespace
def regularizeWhitespace(self, s):
    '''Regularize leading whitespace in s:
    Convert tabs to blanks or vice versa depending on the @tabwidth in effect.
    This is only called for strict languages.'''
    changed = False; lines = g.splitLines(s); result = []; tab_width = self.tab_width
    if tab_width < 0: # Convert tabs to blanks.
        for line in lines:
            i, w = g.skip_leading_ws_with_indent(line, 0, tab_width)
            s = g.computeLeadingWhitespace(w, -abs(tab_width)) + line[i:] # Use negative width.
            if s != line: changed = True
            result.append(s)
    elif tab_width > 0: # Convert blanks to tabs.
        for line in lines:
            s = g.optimizeLeadingWhitespace(line, abs(tab_width)) # Use positive width.
            if s != line: changed = True
            result.append(s)
    if changed:
        action = 'tabs converted to blanks' if self.tab_width < 0 else 'blanks converted to tabs'
        message = 'inconsistent leading whitespace. %s' % action
        self.report(message)
    return ''.join(result)
#@+node:ekr.20170123041745.65: *6* BaseScanner.Tokenizing
#@+node:ekr.20170123041745.66: *7* BaseScanner.skip...Token
def skipCommentToken(self, s, i):
    j = self.skipComment(s, i)
    return j, s[i: j]

def skipIdToken(self, s, i):
    j = self.skipId(s, i)
    return j, s[i: j]

def skipNewlineToken(self, s, i):
    return i + 1, '\n'

def skipOtherToken(self, s, i):
    return i + 1, s[i]

def skipStringToken(self, s, i):
    j = self.skipString(s, i)
    return j, s[i: j]

def skipWsToken(self, s, i):
    j = i
    while i < len(s) and s[i] != '\n' and s[i].isspace():
        i += 1
    return i, s[j: i]
#@+node:ekr.20170123041745.67: *7* BaseScanner.tokenize
def tokenize(self, s):
    '''Tokenize string s and return a list of tokens (kind,value,line_number)

    where kind is in ('comment,'id','nl','other','string','ws').

    This is used only to verify the imported text.
    '''
    result, i, line_number = [], 0, 0
    while i < len(s):
        progress = j = i
        ch = s[i]
        if ch == '\n':
            kind = 'nl'
            i, val = self.skipNewlineToken(s, i)
        elif ch in ' \t': # self.isSpace(s,i):
            kind = 'ws'
            i, val = self.skipWsToken(s, i)
        elif self.startsComment(s, i):
            kind = 'comment'
            i, val = self.skipCommentToken(s, i)
        elif self.startsString(s, i):
            kind = 'string'
            i, val = self.skipStringToken(s, i)
        elif self.startsId(s, i):
            kind = 'id'
            i, val = self.skipIdToken(s, i)
        else:
            kind = 'other'
            i, val = self.skipOtherToken(s, i)
        assert progress < i and j == progress
        if val:
            result.append((kind, val, line_number),)
        # Use the raw token, s[j:i] to count newlines, not the munged val.
        line_number += s[j: i].count('\n')
        # g.trace('%3s %7s %s' % (line_number,kind,repr(val[:20])))
    return result
#@+node:ekr.20170123041745.68: *6* BaseScanner.check & helpers (New)
def check(self, unused_s, parent):
    '''ImportController.check'''
    trace = False and not g.unitTesting
    trace_lines = True
    no_clean = True # True: strict lws check for *all* languages.
    fn = g.shortFileName(self.root.h)
    s1 = g.toUnicode(self.file_s, self.encoding)
    s2 = self.trial_write()
    clean = self.strip_lws # strip_all, clean_blank_lines
    if self.ws_error or (not no_clean and self.gen_clean):
        s1, s2 = clean(s1), clean(s2)
    # Forgive trailing whitespace problems in the last line:
    if self.ws_error:
        s1, s2 = s1.rstrip()+'\n', s2.rstrip()+'\n'
    ok = s1 == s2
    if not ok and self.name == 'javascript':
        s1, s2 = clean(s1), clean(s2)
        ok = s1 == s2
        if ok and not g.unitTesting:
            g.es_print(
                'indentation error: leading whitespace changed in:',
                self.root.h)
    if not ok:
        lines1, lines2 = g.splitLines(s1), g.splitlines(s2)
        n1, n2 = len(lines1), len(lines2)
        g.es_print('\n===== PERFECT IMPORT FAILED =====', fn)
        g.es_print('len(s1): %s len(s2): %s' % (n1, n2))
        for i in range(min(n1, n2)):
            line1, line2 = lines1[i], lines2[i]
            if line1 != line2:
                 g.es_print('first mismatched line: %s' % (i+1))
                 g.es_print(repr(line1))
                 g.es_print(repr(line2))
                 break
        else:
            g.es_print('all common lines match')
        if trace and trace_lines:
            g.es_print('===== s1: %s' % parent.h)
            for i, s in enumerate(g.splitLines(s1)):
                g.es_print('%3s %r' % (i+1, s))
            g.trace('===== s2')
            for i, s in enumerate(g.splitLines(s2)):
                g.es_print('%3s %r' % (i+1, s))
    if 0: # This is wrong headed.
        if not self.strict and not ok:
            # Suppress the error if lws is the cause.
            clean = self.strip_lws # strip_all, clean_blank_lines
            ok = clean(s1) == clean(s2)
    return ok
#@+node:ekr.20170123041745.69: *7* BaseScanner.clean_blank_lines
def clean_blank_lines(self, s):
    '''Remove all blanks and tabs in all blank lines.'''
    result = ''.join([
        z if z.strip() else z.replace(' ','').replace('\t','')
            for z in g.splitLines(s)
    ])
    return result
#@+node:ekr.20170123041745.70: *7* BaseScanner.strip_*
def strip_all(self, s):
    '''Strip blank lines and leading whitespace from all lines of s.'''
    return self.strip_lws(self.strip_blank_lines(s))

def strip_blank_lines(self, s):
    '''Strip all blank lines from s.'''
    return ''.join([z for z in g.splitLines(s) if z.strip()])

def strip_lws(self, s):
    '''Strip leading whitespace from all lines of s.'''
    return ''.join([z.lstrip() for z in g.splitLines(s)])
#@+node:ekr.20170123041745.71: *7* BaseScanner.trial_write
def trial_write(self):
    '''Return the trial write for self.root.'''
    at = self.c.atFileCommands
    if self.gen_refs:
        # Alas, the *actual* @auto write code refuses to write section references!!
        at.write(self.root,
                nosentinels=True,           # was False,
                perfectImportFlag=False,    # was True,
                scriptWrite=True,           # was False,
                thinFile=True,
                toString=True,
            )
    else:
        at.writeOneAtAutoNode(
            self.root,
            toString=True,
            force=True,
            trialWrite=True,
        )
    return g.toUnicode(at.stringOutput, self.encoding)
#@+node:ekr.20170123041745.72: *5* class ImportController
class ImportController(object):
    '''The base class for all new (line-oriented) controller classes.'''

    @others
#@+node:ekr.20170123041745.73: *6* IC.__init__
@nobeautify

def __init__(self,
    importCommands,
    atAuto,
    gen_clean = True,
    gen_refs = False,
    language = None, # For @language directive.
    name = None, # The kind of importer.
    scanner = None,
    strict = False,
):
    '''ctor for BaseScanner.'''
    # Copies of args...
    self.importCommands = ic = importCommands
    self.atAuto = atAuto
    self.c = c = ic.c
    self.encoding = ic.encoding
    self.language = language or name
        # For the @language directive.
    self.name = name or language
    assert language or name
    self.state = scanner # A scanner instance.
    self.strict = strict
        # True: leading whitespace is significant.
    assert scanner, 'Caller must provide a LineScanner instance'

    # Set from ivars...
    self.has_decls = name not in ('xml', 'org-mode', 'vimoutliner')
    self.is_rst = name in ('rst',)
    self.tree_type = ic.treeType # '@root', '@file', etc.
    
    # Constants..
    self.gen_clean = gen_clean
    self.gen_refs = gen_refs
    self.tab_width = None # Must be set in run()

    # The ws equivalent to one tab.
    # self.tab_ws = ' ' * abs(self.tab_width) if self.tab_width < 0 else '\t'

    # Settings...
    self.at_auto_warns_about_leading_whitespace = c.config.getBool(
        'at_auto_warns_about_leading_whitespace')
    self.warn_about_underindented_lines = True
    # self.at_auto_separate_non_def_nodes = False

    # State vars.
    self.errors = 0
    ic.errors = 0 # Required.
    self.ws_error = False
    self.root = None
#@+node:ekr.20170123041745.74: *6* IC.check & helpers
def check(self, unused_s, parent):
    '''ImportController.check'''
    trace = False and not g.unitTesting
    trace_lines = True
    no_clean = True # True: strict lws check for *all* languages.
    fn = g.shortFileName(self.root.h)
    s1 = g.toUnicode(self.file_s, self.encoding)
    s2 = self.trial_write()
    clean = self.strip_lws # strip_all, clean_blank_lines
    if self.ws_error or (not no_clean and self.gen_clean):
        s1, s2 = clean(s1), clean(s2)
    # Forgive trailing whitespace problems in the last line:
    if self.ws_error:
        s1, s2 = s1.rstrip()+'\n', s2.rstrip()+'\n'
    ok = s1 == s2
    if not ok and self.name == 'javascript':
        s1, s2 = clean(s1), clean(s2)
        ok = s1 == s2
        if ok and not g.unitTesting:
            g.es_print(
                'indentation error: leading whitespace changed in:',
                self.root.h)
    if not ok:
        lines1, lines2 = g.splitLines(s1), g.splitlines(s2)
        n1, n2 = len(lines1), len(lines2)
        g.es_print('\n===== PERFECT IMPORT FAILED =====', fn)
        g.es_print('len(s1): %s len(s2): %s' % (n1, n2))
        for i in range(min(n1, n2)):
            line1, line2 = lines1[i], lines2[i]
            if line1 != line2:
                 g.es_print('first mismatched line: %s' % (i+1))
                 g.es_print(repr(line1))
                 g.es_print(repr(line2))
                 break
        else:
            g.es_print('all common lines match')
        if trace and trace_lines:
            g.es_print('===== s1: %s' % parent.h)
            for i, s in enumerate(g.splitLines(s1)):
                g.es_print('%3s %r' % (i+1, s))
            g.trace('===== s2')
            for i, s in enumerate(g.splitLines(s2)):
                g.es_print('%3s %r' % (i+1, s))
    if 0: # This is wrong headed.
        if not self.strict and not ok:
            # Suppress the error if lws is the cause.
            clean = self.strip_lws # strip_all, clean_blank_lines
            ok = clean(s1) == clean(s2)
    return ok
#@+node:ekr.20170123041745.75: *7* IC.clean_blank_lines
def clean_blank_lines(self, s):
    '''Remove all blanks and tabs in all blank lines.'''
    result = ''.join([
        z if z.strip() else z.replace(' ','').replace('\t','')
            for z in g.splitLines(s)
    ])
    return result
#@+node:ekr.20170123041745.76: *7* IC.strip_*
def strip_all(self, s):
    '''Strip blank lines and leading whitespace from all lines of s.'''
    return self.strip_lws(self.strip_blank_lines(s))

def strip_blank_lines(self, s):
    '''Strip all blank lines from s.'''
    return ''.join([z for z in g.splitLines(s) if z.strip()])

def strip_lws(self, s):
    '''Strip leading whitespace from all lines of s.'''
    return ''.join([z.lstrip() for z in g.splitLines(s)])
#@+node:ekr.20170123041745.77: *7* IC.trial_write
def trial_write(self):
    '''Return the trial write for self.root.'''
    at = self.c.atFileCommands
    if self.gen_refs:
        # Alas, the *actual* @auto write code refuses to write section references!!
        at.write(self.root,
                nosentinels=True,           # was False,
                perfectImportFlag=False,    # was True,
                scriptWrite=True,           # was False,
                thinFile=True,
                toString=True,
            )
    else:
        at.writeOneAtAutoNode(
            self.root,
            toString=True,
            force=True,
            trialWrite=True,
        )
    return g.toUnicode(at.stringOutput, self.encoding)
#@+node:ekr.20170123041745.78: *6* IC.Overrides
# These can be overridden in subclasses.
#@+node:ekr.20170123041745.79: *7* IC.adjust_parent
def adjust_parent(self, parent, headline):
    '''Return the effective parent.

    This is overridden by the RstScanner class.'''
    return parent
#@+node:ekr.20170123041745.80: *7* IC.clean_headline
def clean_headline(self, s):
    '''
    Return the cleaned version headline s.
    Will typically be overridden in subclasses.
    '''
    return s.strip()
#@+node:ekr.20170123041745.81: *6* IC.run (entry point) & helpers
def run(self, s, parent, parse_body=False, prepass=False):
    '''The common top-level code for all scanners.'''
    trace = False and not g.unitTesting
    if trace: g.trace('=' * 30, parent.h)
    c = self.c
    if prepass:
        g.trace('(ImportController) Can not happen, prepass is True')
        return True, [] # Don't split any nodes.
    self.root = root = parent.copy()
    self.file_s = s
    # Init the error/status info.
    self.errors = 0
    # Check for intermixed blanks and tabs.
    self.tab_width = c.getTabWidth(p=root)
    ws_ok = self.check_blanks_and_tabs(s) # Only issues warnings.
    # Regularize leading whitespace
    if not ws_ok:
        s = self.regularize_whitespace(s)
    # Generate the nodes, including directives and section references.
    changed = c.isChanged()
    self.v1_scan(s, parent)
    self.post_pass(parent)
    # Check the generated nodes.
    # Return True if the result is equivalent to the original file.
    ok = self.errors == 0 and self.check(s, parent)
    g.app.unitTestDict['result'] = ok
    # Insert an @ignore directive if there were any serious problems.
    if not ok:
        self.insert_ignore_directive(parent)
    # It's always useless for an an import to dirty the outline.
    for p in root.self_and_subtree():
        p.clearDirty()
    c.setChanged(changed)
    if trace: g.trace('-' * 30, parent.h)
    return ok
#@+node:ekr.20170123041745.82: *7* IC.check_blanks_and_tabs
def check_blanks_and_tabs(self, lines):
    '''Check for intermixed blank & tabs.'''
    # Do a quick check for mixed leading tabs/blanks.
    trace = False and not g.unitTesting
    fn = g.shortFileName(self.root.h)
    w = self.tab_width
    blanks = tabs = 0
    for s in g.splitLines(lines):
        lws = self.get_str_lws(s)
        blanks += lws.count(' ')
        tabs += lws.count('\t')
    # Make sure whitespace matches @tabwidth directive.
    if w < 0:
        ok = tabs == 0
        message = 'tabs found with @tabwidth %s in %s' % (w, fn)
    elif w > 0:
        ok = blanks == 0
        message = 'blanks found with @tabwidth %s in %s' % (w, fn)
    if ok:
        ok = blanks == 0 or tabs == 0
        message = 'intermixed blanks and tabs in: %s' % (fn)
    if ok:
        if trace: g.trace('=====', len(lines), blanks, tabs)
    else:
        if g.unitTesting:
            self.report(message)
        else:
            g.es_print(message)
    return ok
#@+node:ekr.20170123041745.83: *7* IC.insert_ignore_directive
def insert_ignore_directive(self, parent):
    c = self.c
    parent.b = parent.b.rstrip() + '\n@ignore\n'
    if g.unitTesting:
        g.app.unitTestDict['fail'] = g.callers()
    elif parent.isAnyAtFileNode() and not parent.isAtAutoNode():
        g.warning('inserting @ignore')
        c.import_error_nodes.append(parent.h)
#@+node:ekr.20170123041745.84: *7* IC.post_pass
def post_pass(self, parent):
    '''Clean up parent's children.'''
    # Clean the headlines.
    for p in parent.subtree():
        h = self.clean_headline(p.h)
        assert h
        if h != p.h: p.h = h
    # Clean the nodes, in a language-dependent way.
    if hasattr(self, 'clean_nodes'):
        # pylint: disable=no-member
        self.clean_nodes(parent)
    # Unindent nodes.
    for p in parent.subtree():
        if p.b.strip():
            p.b = self.undent(p)
        else:
            p.b = ''
    # Delete empty nodes.
    aList = []
    for p in parent.subtree():
        s = p.b
        back = p.threadBack()
        if not s.strip() and not p.isCloned() and back != parent:
            back.b = back.b + s
            aList.append(p.copy())
    self.c.deletePositionsInList(aList)
#@+node:ekr.20170123041745.85: *7* IC.regularize_whitespace
def regularize_whitespace(self, s):
    '''
    Regularize leading whitespace in s:
    Convert tabs to blanks or vice versa depending on the @tabwidth in effect.
    '''
    trace = False and not g.unitTesting
    trace_lines = False
    kind = 'tabs' if self.tab_width > 0 else 'blanks'
    kind2 = 'blanks' if self.tab_width > 0 else 'tabs'
    fn = g.shortFileName(self.root.h)
    lines = g.splitLines(s)
    count, result, tab_width = 0, [], self.tab_width
    if tab_width < 0: # Convert tabs to blanks.
        for n, line in enumerate(lines):
            i, w = g.skip_leading_ws_with_indent(line, 0, tab_width)
            s = g.computeLeadingWhitespace(w, -abs(tab_width)) + line[i:]
                # Use negative width.
            if s != line:
                count += 1
                if trace and trace_lines:
                    g.es_print('%s: %r\n%s: %r' % (n+1, line, n+1, s))
            result.append(s)
    elif tab_width > 0: # Convert blanks to tabs.
        for n, line in enumerate(lines):
            s = g.optimizeLeadingWhitespace(line, abs(tab_width))
                # Use positive width.
            if s != line:
                count += 1
                if trace and trace_lines:
                    g.es_print('%s: %r\n%s: %r' % (n+1, line, n+1, s))
            result.append(s)
    if count:
        self.ws_error = True # A flag to check.
        if not g.unitTesting:
            # g.es_print('Warning: Intermixed tabs and blanks in', fn)
            # g.es_print('Perfect import test will ignoring leading whitespace.')
            g.es_print('changed leading %s to %s in %s line%s in %s' % (
                kind2, kind, count, g.plural(count), fn))
        if g.unitTesting: # Sets flag for unit tests.
            self.report('changed %s lines' % count) 
    return ''.join(result)
#@+node:ekr.20170123041745.86: *6* IC.Utils
#@+node:ekr.20170123041745.87: *7* IC.Common Utils
#@+node:ekr.20170123041745.88: *8* IC.get_str_lws
def get_str_lws(self, s):
    '''Return the characters of the lws of s.'''
    m = re.match(r'(\s*)', s)
    return m.group(0) if m else ''
#@+node:ekr.20170123041745.89: *8* IC.Messages
def error(self, s):
    self.errors += 1
    self.importCommands.errors += 1
    if g.unitTesting:
        if self.errors == 1:
            g.app.unitTestDict['actualErrorMessage'] = s
        g.app.unitTestDict['actualErrors'] = self.errors
    else:
        g.error('Error:', s)

def report(self, message):
    if self.strict:
        self.error(message)
    else:
        self.warning(message)

def warning(self, s):
    if not g.unitTesting:
        g.warning('Warning:', s)
#@+node:ekr.20170123041745.90: *8* IC.undent & helper
def undent(self, p):
    '''Remove maximal leading whitespace from the start of all lines.'''
    trace = False and not g.unitTesting # and self.root.h.find('main') > -1
    lines = g.splitLines(p.b)
    if self.is_rst:
        return ''.join(lines) # Never unindent rst code.
    ws = self.common_lws(lines)
    if trace:
        g.trace('common_lws:', repr(ws))
        print('===== lines:\n%s' % ''.join(lines))
    result = []
    for s in lines:
        if s.startswith(ws):
            result.append(s[len(ws):])
        elif self.strict:
            # Indicate that the line is underindented.
            result.append("%s%s.%s" % (
                self.c.atFileCommands.underindentEscapeString,
                g.computeWidth(ws, self.tab_width),
                s.lstrip()))
        else:
            result.append(s.lstrip())
    if trace:
        print('----- result:\n%s' % ''.join(result))
    return ''.join(result)
#@+node:ekr.20170123041745.91: *9* IC.common_lws
def common_lws(self, lines):
    '''Return the lws common to all lines.'''
    trace = False and not g.unitTesting
    if not lines:
        return ''
    lws = self.get_str_lws(lines[0])
    for s in lines:
        lws2 = self.get_str_lws(s)
        if lws2.startswith(lws):
            pass
        elif lws.startswith(lws2):
            lws = lws2
        else:
            lws = '' # Nothing in common.
            break
    if trace: g.trace(repr(lws), repr(lines[0]))
    return lws
#@+node:ekr.20170123041745.92: *8* IC.underindented_comment/line
def underindented_comment(self, line):
    if self.at_auto_warns_about_leading_whitespace:
        self.warning(
            'underindented python comments.\n' +
            'Extra leading whitespace will be added\n' + line)

def underindented_line(self, line):
    if self.warn_about_underindented_lines:
        self.error(
            'underindented line.\n'
            'Extra leading whitespace will be added\n' + line)
#@+node:ekr.20170123041745.93: *7* IC.Utils (V1)
#@+node:ekr.20170123041745.94: *8* IC.create_child_node (V1)
def create_child_node(self, parent, body, headline):
    '''Create a child node of parent.'''
    trace = False and not g.unitTesting
    if trace: g.trace('\n\n%s === in === %s\n' % (headline, parent.h))
    p = parent.insertAsLastChild()
    assert g.isString(body), repr(body)
    assert g.isString(headline), repr(headline)
    p.b = g.u(body)
    p.h = g.u(headline)
    return p
#@+node:ekr.20170123041745.95: *8* IC.append_to_body (V1)
def append_to_body(self, p, s):
    '''
    Similar to c.appendStringToBody,
    but does not recolor the text or redraw the screen.
    '''
    assert g.isString(s), (repr(s), g.callers())
    assert g.isString(p.b), (repr(p.b), g.callers())
    p.b = p.b + s
#@+node:ekr.20170123041745.96: *6* IC.V1: Scanning & code generation
#@+node:ekr.20170123041745.97: *7* IC.gen_lines (entry) & helpers
def gen_lines(self, indent_flag, lines, parent, tag='top-level'):
    '''
    The entry point for parsing and code generation. Also called by
    rescan_code_block.
    
    Parse all lines, adding to parent.b, creating child nodes as necessary.
    '''
    trace = False and not g.unitTesting and self.root.h.endswith('.py')
    if not lines:
        return
    gen_refs, state = self.gen_refs, self.state
    if trace:
        g.trace(tag, state)
        print('===== entry lines:...')
        for line in lines:
            print(line.rstrip())
        print('----- end entry lines.')
    assert not state.context, state
    i, ref_flag = 0, False
    while i < len(lines):
        progress = i
        line = lines[i]
        state.scan_line(line)
        if state.starts_block():
            # Generate the reference first.
            ref_flag = self.gen_ref(indent_flag, line, parent, ref_flag)
            # Scan the code block and its tail.
            code_lines, tail_lines = self.skip_code_block(i, lines)
            i += (len(code_lines) + len(tail_lines))
            if gen_refs:
                self.rescan_code_block(code_lines, parent)
                self.append_to_body(parent, ''.join(tail_lines))
            else:
                code_lines.extend(tail_lines)
                self.rescan_code_block(code_lines, parent)
        else:
            # This works for both @others and section references.
            # After @others, child nodes contain *all* lines.
            if ref_flag and not gen_refs:
                g.trace('Can not happen: line not in tail: %r' % line)
            self.append_to_body(parent, line)
            i += 1
        assert progress < i
#@+node:ekr.20170123041745.98: *8* IC.skip_code_block
def skip_code_block(self, i, lines):
    '''
    lines[i] starts a class or function.
    
    Return (code_lines, tail_lines) where:
    - code_lines are all the lines of the class or function.
    - tail lines are all lines up to but not including the next class or function.
    '''
    trace = False and not g.unitTesting and self.root.h.endswith('.py')
    trace_lines = False
    trace_entry = True
    trace_results = True
    state = self.state
    assert state.starts_block()
    assert not state.context, state
    state.push()
    state.clear()
    if trace and trace_entry:
        g.trace(state)
        print('===== entry lines:...')
        for j in range(i, len(lines)):
            print('  %s' % lines[j].rstrip()) # entry lines.
        print('----- end entry lines')
    # Scan the code block.
    # We have cleared the state, so rescan the first line.
    block_i = i
    state.scan_line(lines[i])
    assert state.starts_block()
    i += 1
    while i < len(lines):
        progress = i
        line = lines[i]
        if trace and trace_lines:
            g.trace(state, line.rstrip())
        state.scan_line(line)
        if state.continues_block():
            i += 1
        elif self.name == 'python':
            break
        else:
            i += 1
            break
        assert progress < i
    code_lines = lines[block_i:i]
    if trace and trace_results:
        g.trace('===== code lines:...')
        for line in code_lines:
            print('  %s' % line.rstrip()) # code lines.
        print('----- end code lines')
    # Scan the block's tail.
    # Line i is *not* part of the code block and it has not been scanned.
    tail_i = i
    if i < len(lines):
        state.scan_line(lines[i])
        if not state.starts_block():
            i += 1 # Add the just-scanned line.
            while i < len(lines):
                line = lines[i]
                if trace and trace_lines: g.trace(line.rstrip())
                state.scan_line(line)
                if state.starts_block():
                    break
                else:
                    i += 1
    tail_lines = lines[tail_i:i]
    if trace and trace_results:
        g.trace('===== tail lines:...')
        for line in tail_lines:
            print('  %s' % line.rstrip()) # tail lines.
        print('----- end-tail-lines')
    state.pop()
    return code_lines, tail_lines
#@+node:ekr.20170123041745.99: *7* IC.gen_ref
def gen_ref(self, indent_flag, line, parent, ref_flag):
    '''
    Generate the ref line and a flag telling this method whether a previous
    @others
    '''
    trace = False and not g.unitTesting
    indent_ws = self.get_str_lws(line)
        # Ignore indent_flag: Hurray!
    if self.is_rst and not self.atAuto:
        return None, None
    elif self.gen_refs:
        headline = self.clean_headline(line)
        ref = '%s%s\n' % (
            indent_ws,
            g.angleBrackets(' %s ' % headline))
    else:
        ref = None if ref_flag else '%s@others\n' % indent_ws
        ref_flag = True # Don't generate another @others.
    if ref:
        if trace: g.trace('%s indent_ws: %r line: %r parent: %s' % (
            '*' * 20, indent_ws, line, parent.h))
        self.append_to_body(parent, ref)
    return ref_flag
#@+node:ekr.20170123041745.100: *7* IC.v1_scan
def v1_scan(self, s, parent, parse_body=False):
    '''
    v1_scan: V1 of line-based scanners and code generators.
    
    Create a child of self.root for:
    - Leading outer-level declarations.
    - Outer-level classes.
    - Outer-level functions.
    '''
    # Create the initial body text in the root.
    if parse_body:
        pass
    else:
        self.append_to_body(parent,
            '@language %s\n@tabwidth %d\n' % (
                self.language,
                self.tab_width))
        self.gen_lines(
            indent_flag = False,
            lines = g.splitlines(s),
            parent = parent,
        )
#@+node:ekr.20170123041745.101: *7* IC.rescan_code_block (calls gen_lines)
def rescan_code_block(self, lines, parent):
    '''Create a child of the parent, and add lines to parent.b.'''
    if not lines:
        return
    state = self.state
    first_line = lines[0]
    assert first_line.strip
    headline = self.clean_headline(first_line)
    if self.gen_refs:
        headline = g.angleBrackets(' %s ' % headline)
    child = self.create_child_node(
        parent,
        body = '',
        headline = headline)
    if self.name == 'python':
        last_line = None
        lines = lines[1:]
    else:
        last_line = lines[-1]
        lines = lines[1:-1]
    self.append_to_body(child, first_line)
    state.push()
    self.gen_lines(
        indent_flag = True,
        lines = lines,
        parent = child,
        tag = 'rescan_code_block')
    state.pop()
    if last_line:
        self.append_to_body(child, last_line)
#@+node:ekr.20170123041745.102: *5* class LineScanner
class LineScanner(object):
    '''
    A class to scan lines.
    
    Subclasses overide *both* the scan_line and v2_scan_line methods.
    These methods work with the various X_ScanState classes.
    '''

    @others
#@+node:ekr.20170123041745.103: *6* scanner.__init__ & __repr__
def __init__(self, c, language=None):
    '''Ctor for the LineScanner class.'''
    self.c = c
    self.language = language 
    self.comment_delims = g.set_delims_from_language(language) if language else None
        # For general_line_scanner
    self.tab_width = c.tab_width
    self.context = '' # Represents cross-line constructs.
    self.base_curlies = self.curlies = 0
    self.stack = []

def __repr__(self):
    '''LineScanner.__repr__'''
    return 'LineScanner: base: %r now: %r context: %2r' % (
        '{' * self.base_curlies,
        '{' * self.curlies, self.context)
        
__str__ = __repr__
#@+node:ekr.20170123041745.104: *6* scanner.get_int_lws
def get_int_lws(self, s):
    '''Return the the lws (a number) of line s.'''
    return g.computeLeadingWhitespaceWidth(s, self.c.tab_width)
#@+node:ekr.20170123041745.105: *6* scanner.match
def match(self, s, i, pattern):
    '''Return True if the pattern matches at s[i:]'''
    return s[i:i+len(pattern)] == pattern
#@+node:ekr.20170123041745.106: *6* V1 methods
#@+node:ekr.20170123041745.107: *7* scanner.clear, push & pop (V1)
def clear(self):
    '''Clear the state.'''
    self.base_curlies = self.curlies = 0
    self.context = ''

def pop(self):
    '''Restore the base state from the stack.'''
    self.base_curlies = self.stack.pop()
    
def push(self):
    '''Save the base state on the stack and enter a new base state.'''
    self.stack.append(self.base_curlies)
    self.base_curlies = self.curlies
#@+node:ekr.20170123041745.108: *7* scanner.scan_line (V1)
def scan_line(self, s):
    '''
    A *typical* line scanner. Subclasses should redefine this method.

    Commented-out code illustrates how to handle block comments.
    '''
    trace = False and not g.unitTesting
    contexts = strings = ['"', "'"]
    # match = self.match
    # block1, block2 = '/*', '*/
    # contexts.append(block1)
    i = 0
    while i < len(s):
        progress = i
        ch = s[i]
        if self.context:
            assert self.context in contexts, repr(self.context)
            if ch == '\\':
                i += 1 # Eat the next character later.
            elif self.context in strings and self.context == ch:
                self.context = '' # End the string.
            # elif self.context == block1 and match(s, i, block2):
                # self.context = '' # End the block comment.
                # i += (len(block2) - 1)
            else:
                pass # Eat the string character later.
        elif ch in strings:
            self.context = ch
        # elif match(s, i, block1):
            # self.context = block1
            # i += (len(block1) - 1)
        # elif match(s, i, line_comment):
        elif ch == '#':
            break # The single-line comment ends the line.
        elif ch == '{': self.curlies += 1
        elif ch == '}': self.curlies -= 1
        i += 1
        assert progress < i
    if trace:
        g.trace(self, s.rstrip())
#@+node:ekr.20170123041745.109: *7* scanner.continues_block and starts_block (V1)
def continues_block(self):
    '''Return True if the just-scanned lines should be placed in the inner block.'''
    return self.context or self.curlies > self.base_curlies

def starts_block(self):
    '''Return True if the just-scanned line starts an inner block.'''
    return not self.context and self.curlies > self.base_curlies
#@+node:ekr.20170123041842.1: *4* COPY importers/python.py
'''The new, line-based, @auto importer for Python.'''
# py--lint: disable=no-name-in-module
# basescanner does not exist now.
NEW = True
    # True: Use new python scanner.
    # False: Use legacy python scanner, and BaseScanner class.
import re
import leo.core.leoGlobals as g
if NEW:
    import leo.plugins.importers.linescanner as linescanner
    Importer = linescanner.Importer
    Target = linescanner.Target
else:
    import leo.plugins.importers.basescanner as basescanner
    BaseScanner = basescanner.BaseScanner
    
@others
importer_dict = {
    'class': Py_Importer if NEW else PythonScanner,
    'extensions': ['.py', '.pyw', '.pyi'],
        # mypy uses .pyi extension.
}
#@+node:ekr.20170123041842.2: *5* class Py_Importer(Importer)
if NEW:

    class Py_Importer(Importer):
        '''A class to store and update scanning state.'''
        
        def __init__(self, importCommands, atAuto, language=None, alternate_language=None):
            '''Py_Importer.ctor.'''
            # Init the base class.
            Importer.__init__(self,
                importCommands,
                atAuto=atAuto,
                language='python',
                state_class = Python_ScanState,
                strict=True,
            )
    
        @others
#@+node:ekr.20170123041842.3: *6* py_i.clean_headline
def clean_headline(self, s):
    '''Return a cleaned up headline s.'''
    m = re.match(r'\s*def\s+(\w+)', s)
    if m:
        return m.group(1)
    else:
        m = re.match(r'\s*class\s+(\w+)', s)
        if m:
            return 'class %s' % m.group(1)
        else:
            return s.strip()
#@+node:ekr.20170123041842.4: *6* py_i.get_new_dict
@nobeautify

def get_new_dict(self, context):
    '''
    Return a *general* state dictionary for the given context.
    Subclasses may override...
    '''
    trace = False and g.unitTesting
    comment, block1, block2 = self.single_comment, self.block1, self.block2
    
    def add_key(d, key, data):
        aList = d.get(key,[])
        aList.append(data)
        d[key] = aList

    if context:
        d = {
            # key   kind    pattern ends?
            '\\':   [('len+1', '\\',None),],
            '"':[
                    ('len', '"""',  context == '"""'),
                    ('len', '"',    context == '"'),
                ],
            "'":[
                    ('len', "'''",  context == "'''"),
                    ('len', "'",    context == "'"),
                ],
        }
        if block1 and block2:
            add_key(d, block2[0], ('len', block1, True))
    else:
        # Not in any context.
        d = {
            # key    kind pattern new-ctx  deltas
            '\\': [('len+1','\\', context, None),],
            '#':  [('all', '#',   context, None),],
            '"':[
                    # order matters.
                    ('len', '"""',  '"""', None),
                    ('len', '"',    '"',   None),
                ],
            "'":[
                    # order matters.
                    ('len', "'''",  "'''", None),
                    ('len', "'",    "'",   None),
                ],
            '{':    [('len', '{', context, (1,0,0)),],
            '}':    [('len', '}', context, (-1,0,0)),],
            '(':    [('len', '(', context, (0,1,0)),],
            ')':    [('len', ')', context, (0,-1,0)),],
            '[':    [('len', '[', context, (0,0,1)),],
            ']':    [('len', ']', context, (0,0,-1)),],
        }
        if comment:
            add_key(d, comment[0], ('all', comment, '', None))
        if block1 and block2:
            add_key(d, block1[0], ('len', block1, block1, None))
    if trace: g.trace('created %s dict for %r state ' % (self.name, context))
    return d
#@+node:ekr.20170123041842.5: *6* py_i.gen_lines & overrides
def gen_lines(self, s, parent):
    '''
    Non-recursively parse all lines of s into parent, creating descendant
    nodes as needed.
    '''
    trace = False # and g.unitTesting
    tail_p = None
    prev_state = self.state_class()
    target = PythonTarget(parent, prev_state)
    stack = [target, target]
    self.inject_lines_ivar(parent)
    lines = g.splitLines(s)
    self.skip = 0
    first = True
    for i, line in enumerate(lines):
        new_state = self.scan_line(line, prev_state)
        top = stack[-1]
        if trace: self.trace_status(line, new_state, prev_state, stack, top)
        if self.skip > 0:
            self.skip -= 1
        elif self.starts_block(i, lines, new_state, prev_state, stack):
            first = False
            tail_p = None
            self.start_new_block(i, lines, new_state, prev_state, stack)
        elif first:
            if self.is_ws_line(line):
                p = tail_p or top.p
                self.add_line(p, line)
            else:
                first = False
                h = 'Declarations'
                self.gen_ref(line, parent, target)
                p = self.create_child_node(parent, body=line, headline=h)
                stack.append(PythonTarget(p, new_state))
        elif self.ends_block(line, new_state, prev_state, stack):
            first = False
            tail_p = self.end_block(i, lines, new_state, prev_state, stack)
        else:
            p = tail_p or top.p
            self.add_line(p, line)
        prev_state = new_state
#@+node:ekr.20170123041842.6: *7* python_i.common_lws
def common_lws(self, lines):
    '''Return the lws (a string) common to all lines.'''
    return lines and self.get_str_lws(lines[0]) or ''
        # We must unindent the class/def line fully.
        # It would be wrong to examine the indentation of other lines.
#@+node:ekr.20170123041842.7: *7* python_i.cut_stack
def cut_stack(self, new_state, stack, append=False):
    '''Cut back the stack until stack[-1] matches new_state.'''
    # pylint: disable=arguments-differ
    trace = False # and g.unitTesting
    if trace:
        g.trace(new_state)
        g.printList(stack)
    assert len(stack) > 1 # Fail on entry.
    while stack:
        top_state = stack[-1].state
        if new_state.level() < top_state.level():
            if trace: g.trace('new_state < top_state', top_state)
            assert len(stack) > 1, stack # <
            stack.pop()
        elif top_state.level() == new_state.level():
            if trace: g.trace('new_state == top_state', top_state)
            assert len(stack) > 1, stack # ==
            if append:
                pass # Append line to the previous node.
            else:
                stack.pop() # Create a new node.
            break
        else:
            # This happens often in valid Python programs.
            if trace: g.trace('new_state > top_state', top_state)
            break
    # Restore the guard entry if necessary.
    if len(stack) == 1:
        if trace: g.trace('RECOPY:', stack)
        stack.append(stack[-1])
    assert len(stack) > 1 # Fail on exit.
    if trace: g.trace('new target.p:', stack[-1].p.h)
#@+node:ekr.20170123041842.8: *7* python_i.end_block
def end_block(self, i, lines, new_state, prev_state, stack):
    '''
    Handle a line that terminates the previous class/def. The line is
    neither a class/def line, and we are not in a multi-line token.
    
    Skip all lines that are at the same level as the class/def.
    '''
    # pylint: disable=arguments-differ
    top = stack[-1]
    assert new_state.indent < top.state.indent, (
        '\nnew: %s\ntop: %s' % (new_state, top.state))
    assert self.skip == 0, self.skip
    end_indent = new_state.indent
    while i < len(lines):
        progress = i
        self.cut_stack(new_state, stack, append=True)
        top = stack[-1]
        # Add the line.
        line = lines[i]
        self.add_line(top.p, line)
        # Move to the next line.
        i += 1
        if i >= len(lines):
            break
        prev_state = new_state
        new_state = self.scan_line(line, prev_state)
        if self.starts_block(i, lines, new_state, prev_state, stack):
            break
        elif not self.is_ws_line(line) and new_state.indent <= end_indent:
            break
        else:
            self.skip += 1
        assert progress < i, repr(line)
    return top.p
#@+node:ekr.20170123041842.9: *7* python_i.ends_block
def ends_block(self, line, new_state, prev_state, stack):
    '''True if line ends the block.'''
    # Comparing new_state against prev_state does not work for python.
    if line.isspace() or prev_state.in_context():
        return False
    else:
        # *Any* underindented non-blank line ends the class/def.
        top = stack[-1]
        return new_state.level() < top.state.level()
#@+node:ekr.20170123041842.10: *7* python_i.gen_ref
def gen_ref(self, line, parent, target):
    '''
    Generate the at-others and a flag telling this method whether a previous
    @others
    '''
    trace = False # and g.unitTesting
    indent_ws = self.get_str_lws(line)
    ### lws = self.get_int_lws(line)
    h = self.clean_headline(line) 
    if not target.at_others_flag:
        target.at_others_flag = True
        ref = '%s@others\n' % indent_ws
        if trace:
            g.trace('indent_ws: %r line: %r parent: %s' % (
                 indent_ws, line, parent.h))
            g.printList(self.get_lines(parent))
        self.add_line(parent,ref)
    return h
#@+node:ekr.20170123041842.11: *7* python_i.move_decorators & helpers
def move_decorators(self, new_p, prev_p):
    '''
    Move decorators from the end of prev_p to the start of new_state.p.
    These lines may be on the other side of @others.
    '''
    if new_p.v == prev_p.v:
        return
    prev_lines = self.get_lines(prev_p)
    new_lines = self.get_lines(new_p)
    moved_lines = []
    if prev_lines and self.is_at_others(prev_lines[-1]):
        at_others_line = prev_lines.pop()
    else:
        at_others_line = None
    while prev_lines:
        line = prev_lines[-1]
        if self.is_decorator(line):
            prev_lines.pop()
            moved_lines.append(line)
        else:
            break
    if at_others_line:
        prev_lines.append(at_others_line)
    if moved_lines:
        self.set_lines(new_p, list(reversed(moved_lines)) + new_lines)
#@+node:ekr.20170123041842.12: *8* def python_i.is_at_others/is_decorator
at_others_pattern = re.compile(r'^\s*@others$')

def is_at_others(self, line):
    '''True if line is @others'''
    return self.at_others_pattern.match(line)

decorator_pattern = re.compile(r'^\s*@(.*)$')

def is_decorator(self, line):
    '''True if line is a python decorator, not a Leo directive.'''
    m = self.decorator_pattern.match(line)
    return m and m.group(1) not in g.globalDirectiveList
#@+node:ekr.20170123041842.13: *7* python_i.promote_last_lines
def promote_last_lines(self, parent):
    '''python_i.promote_last_lines.'''
    # last = parent.lastNode()
    # g.trace(last and last.h or '<no last node>')
#@+node:ekr.20170123041842.14: *7* python_i.promote_trailing_underindented_lines
def promote_trailing_underindented_lines(self, parent):
    '''
    Promote all trailing underindent lines to the node's parent node,
    deleting one tab's worth of indentation. Typically, this will remove
    the underindent escape.
    '''
    trace = True
    pattern = self.escape_pattern # A compiled regex pattern
    for p in parent.subtree():
        lines = self.get_lines(p)
        tail = []
        while lines:
            line = lines[-1]
            m = pattern.match(line)
            if m:
                lines.pop()
                n_str = m.group(1)
                try:
                    n = int(n_str)
                except ValueError:
                    break
                if n == abs(self.tab_width):
                    new_line = line[len(m.group(0)):]
                    tail.append(new_line)
                else:
                    g.trace('unexpected unindent value', n)
                    break
            else:
                break
        if tail:
            if trace:
                g.trace(parent.h)
                g.printList(reversed(tail))
            parent = p.parent()
            self.set_lines(p, lines)
            self.extend_lines(parent, reversed(tail))
#@+node:ekr.20170123041842.15: *7* python_i.start_new_block
def start_new_block(self, i, lines, new_state, prev_state, stack):
    '''Create a child node and update the stack.'''
    trace = False # and g.unitTesting
    assert not prev_state.in_context(), prev_state
    line = lines[i]
    top = stack[-1]
    prev_p = top.p.copy()
    if trace:
        g.trace('line', repr(line))
        g.trace('top_state', top.state)
        g.trace('new_state', new_state)
        g.printList(stack)
    # Adjust the stack.
    if new_state.indent > top.state.indent:
        pass
    elif new_state.indent == top.state.indent:
        stack.pop()
    else:
        self.cut_stack(new_state, stack)
    # Create the child.
    top = stack[-1]
    parent = top.p
    self.gen_ref(line, parent, top)
    h = self.clean_headline(line) 
    child = self.create_child_node(parent, line, h)
    target = PythonTarget(child, new_state)
    target.kind = 'class' if h.startswith('class') else 'def'
    stack.append(target)
    # Handle previous decorators.
    new_p = stack[-1].p.copy()
    self.move_decorators(new_p, prev_p)
#@+node:ekr.20170123041842.16: *7* python_i.starts_block
starts_pattern = re.compile(r'\s*(class|def)\s+')
    # Matches lines that apparently start a class or def.

def starts_block(self, i, lines, new_state, prev_state, stack):
    '''True if the line startswith class or def outside any context.'''
    # pylint: disable=arguments-differ
    trace = False # and not g.unitTesting
    if prev_state.in_context():
        return False
    line = lines[i]
    m = self.starts_pattern.match(line)
    if not m:
        return False
    top = stack[-1]
    prev_indent = top.state.indent
    if top.kind == 'None' and new_state.indent > 0:
        # Underindented top-level class/def.
        return False
    elif top.kind == 'def' and new_state.indent > prev_indent:
        # class/def within a def.
        return False
    elif top.at_others_flag and new_state.indent > prev_indent:
        return False
    else:
        if trace and new_state.indent > prev_indent:
            g.trace(prev_indent, new_state.indent, repr(line))
            g.trace('@others', top.at_others_flag)
        return True
#@+node:ekr.20170123041842.17: *6* py_i.find_class & helper
def find_class(self, parent):
    '''
    Find the start and end of a class/def in a node.
    
    Return (kind, i, j), where kind in (None, 'class', 'def')
    '''
    trace = True and not g.unitTesting
    prev_state = Python_ScanState()
    target = Target(parent, prev_state)
    stack = [target, target]
    lines = g.splitlines(parent.b)
    index = 0
    for i, line in enumerate(lines):
        new_state = self.scan_line(line, prev_state)
        ### top = stack[-1]
        if trace: g.trace(new_state)
        if self.starts_block(i, lines, new_state, prev_state):
            return self.skip_block(i, index, lines, new_state, stack)
        prev_state = new_state
        index += len(line)
    return None, -1, -1
#@+node:ekr.20170123041842.18: *7* py_i.skip_block
def skip_block(self, i, index, lines, prev_state, stack):
    '''
    Find the end of a class/def starting at index
    on line i of lines.
    
    Return (kind, i, j), where kind in (None, 'class', 'def')
    .'''
    trace = True and not g.unitTesting
    index1 = index
    line = lines[i]
    kind = 'class' if line.strip().startswith('class') else 'def'
    i += 1
    while i < len(lines):
        progress = i
        line = lines[i]
        index += len(line)
        new_state = self.scan_line(line, prev_state)
        top = stack[-1]
        if trace: g.trace('new level', new_state.level(), 'line', line)
        # Similar to self.ends_block.
        if (not self.is_ws_line(line) and
            not prev_state.in_context() and
            new_state.level() <= top.state.level()
        ):
            return kind, index1, index
        prev_state = new_state
        i += 1
        assert progress < i
    return None, -1, -1
#@+node:ekr.20170123041842.19: *5* class Python_ScanState
class Python_ScanState:
    '''A class representing the state of the python line-oriented scan.'''
    
    def __init__(self, d=None):
        '''Python_ScanState ctor.'''
        if d:
            indent = d.get('indent')
            prev = d.get('prev')
            self.indent = prev.indent if prev.bs_nl else indent
            self.context = prev.context
            self.curlies = prev.curlies
            self.parens = prev.parens
            self.squares = prev.squares
        else:
            self.bs_nl = False
            self.context = ''
            self.curlies = self.parens = self.squares = 0
            self.indent = 0

    @others
#@+node:ekr.20170123041842.20: *6* py_state.__repr__
def __repr__(self):
    '''Py_State.__repr__'''
    return 'PyState: %7r indent: %2s {%s} (%s) [%s] bs-nl: %s'  % (
        self.context, self.indent,
        self.curlies, self.parens, self.squares,
        int(self.bs_nl))

__str__ = __repr__
#@+node:ekr.20170123041842.21: *6* py_state.level
def level(self):
    '''Python_ScanState.level.'''
    return self.indent
#@+node:ekr.20170123041842.22: *6* py_state.in_context
def in_context(self):
    '''True if in a special context.'''
    return (
        self.context or
        self.curlies > 0 or
        self.parens > 0 or
        self.squares > 0 or
        self.bs_nl
    )
#@+node:ekr.20170123041842.23: *6* py_state.update
def update(self, data):
    '''
    Update the state using the 6-tuple returned by i.scan_line.
    Return i = data[1]
    '''
    context, i, delta_c, delta_p, delta_s, bs_nl = data
    self.bs_nl = bs_nl
    self.context = context
    self.curlies += delta_c  
    self.parens += delta_p
    self.squares += delta_s
    return i

#@+node:ekr.20170123041842.24: *5* class PythonTarget
class PythonTarget:
    '''
    A class describing a target node p.
    state is used to cut back the stack.
    '''

    def __init__(self, p, state):
        '''Target ctor.'''
        self.at_others_flag = False
            # True: @others has been generated for this target.
        self.kind = 'None' # in ('None', 'class', 'def')
        self.p = p
        self.state = state

    def __repr__(self):
        return 'PyTarget: %s kind: %s @others: %s p: %s' % (
            self.state,
            self.kind,
            int(self.at_others_flag),
            g.shortFileName(self.p.h),
        )
#@+node:ekr.20170123041842.25: *5* class PythonScanner (Legacy)
if not NEW:
    
    class PythonScanner(basescanner.BaseScanner):

        @others
#@+node:ekr.20170123041842.26: *6* __init__
def __init__(self, importCommands, atAuto):
    # Init the base class.
    basescanner.BaseScanner.__init__(self, importCommands, atAuto=atAuto, language='python')
    # Set the parser delims.
    self.lineCommentDelim = '#'
    self.classTags = ['class',]
    self.functionTags = ['def',]
    self.ignoreBlankLines = True
    self.blockDelim1 = self.blockDelim2 = None
        # Suppress the check for the block delim.
        # The check is done in skipSigTail.
    self.strict = True

#@+node:ekr.20170123041842.27: *6* adjustDefStart
def adjustDefStart(self, s, i):
    '''A hook to allow the Python importer to adjust the
    start of a class or function to include decorators.
    '''
    # Invariant: i does not change.
    # Invariant: start is the present return value.
    try:
        assert s[i] != '\n'
        start = j = g.find_line_start(s, i) if i > 0 else 0
        # g.trace('entry',j,i,repr(s[j:i+10]))
        assert j == 0 or s[j - 1] == '\n'
        while j > 0:
            progress = j
            j1 = j = g.find_line_start(s, j - 2)
            # g.trace('line',repr(s[j:progress]))
            j = g.skip_ws(s, j)
            if not g.match(s, j, '@'):
                break
            k = g.skip_id(s, j + 1)
            word = s[j: k]
            # Leo directives halt the scan.
            if word and word in g.globalDirectiveList:
                break
            # A decorator.
            start = j = j1
            assert j < progress
        # g.trace('**returns %s, %s' % (repr(s[start:i]),repr(s[i:i+20])))
        return start
    except AssertionError:
        g.es_exception()
        return i

#@+node:ekr.20170123041842.28: *6* extendSignature
def extendSignature(self, s, i):
    '''Extend the text to be added to the class node following the signature.

    The text *must* end with a newline.'''
    # Add a docstring to the class node,
    # And everything on the line following it
    j = g.skip_ws_and_nl(s, i)
    if g.match(s, j, '"""') or g.match(s, j, "'''"):
        j = g.skip_python_string(s, j)
        if j < len(s): # No scanning error.
            # Return the docstring only if nothing but whitespace follows.
            j = g.skip_ws(s, j)
            if g.is_nl(s, j):
                return j + 1
    return i

#@+node:ekr.20170123041842.29: *6* findClass
def findClass(self, p):
    '''Return the index end of the class or def in a node, or -1.'''
    s, i = p.b, 0
    while i < len(s):
        progress = i
        if s[i] in (' ', '\t', '\n'):
            i += 1
        elif self.startsComment(s, i):
            i = self.skipComment(s, i)
        elif self.startsString(s, i):
            i = self.skipString(s, i)
        elif self.startsClass(s, i):
            return 'class', self.sigStart, self.codeEnd
        elif self.startsFunction(s, i):
            return 'def', self.sigStart, self.codeEnd
        elif self.startsId(s, i):
            i = self.skipId(s, i)
        else:
            i += 1
        assert progress < i, 'i: %d, ch: %s' % (i, repr(s[i]))
    return None, -1, -1

#@+node:ekr.20170123041842.30: *6* skipCodeBlock
def skipCodeBlock(self, s, i, kind):
    trace = False; verbose = True
    # if trace: g.trace('***',g.callers())
    startIndent = self.startSigIndent
    if trace: g.trace('startIndent', startIndent)
    assert startIndent is not None
    i = start = g.skip_ws_and_nl(s, i)
    parenCount = 0
    underIndentedStart = None # The start of trailing underindented blank or comment lines.
    while i < len(s):
        progress = i
        ch = s[i]
        if g.is_nl(s, i):
            if trace and verbose: g.trace(g.get_line(s, i))
            backslashNewline = (i > 0 and g.match(s, i - 1, '\\\n'))
            if backslashNewline:
                # An underindented line, including docstring,
                # does not end the code block.
                i += 1 # 2010/11/01
            else:
                i = g.skip_nl(s, i)
                j = g.skip_ws(s, i)
                if g.is_nl(s, j):
                    pass # We have already made progress.
                else:
                    i, underIndentedStart, breakFlag = self.pythonNewlineHelper(
                        s, i, parenCount, startIndent, underIndentedStart)
                    if breakFlag: break
        elif ch == '#':
            i = g.skip_to_end_of_line(s, i)
        elif ch == '"' or ch == '\'':
            i = g.skip_python_string(s, i)
        elif ch in '[{(':
            i += 1; parenCount += 1
            # g.trace('ch',ch,parenCount)
        elif ch in ']})':
            i += 1; parenCount -= 1
            # g.trace('ch',ch,parenCount)
        else: i += 1
        assert(progress < i)
    # The actual end of the block.
    if underIndentedStart is not None:
        i = underIndentedStart
        if trace: g.trace('***backtracking to underindent range')
        if trace: g.trace(g.get_line(s, i))
    if 0 < i < len(s) and not g.match(s, i - 1, '\n'):
        g.trace('Can not happen: Python block does not end in a newline.')
        g.trace(g.get_line(s, i))
        return i, False
    # 2010/02/19: Include all following material
    # until the next 'def' or 'class'
    i = self.skipToTheNextClassOrFunction(s, i, startIndent)
    if (trace or self.trace) and s[start: i].strip():
        g.trace('%s returns\n' % (kind) + s[start: i])
    return i, True

#@+node:ekr.20170123041842.31: *6* pythonNewlineHelper
def pythonNewlineHelper(self, s, i, parenCount, startIndent, underIndentedStart):
    trace = False
    breakFlag = False
    j, indent = g.skip_leading_ws_with_indent(s, i, self.tab_width)
    if trace: g.trace(
        'startIndent', startIndent, 'indent', indent, 'parenCount', parenCount,
        'line', repr(g.get_line(s, j)))
    if indent <= startIndent and parenCount == 0:
        # An underindented line: it ends the block *unless*
        # it is a blank or comment line or (2008/9/1) the end of a triple-quoted string.
        if g.match(s, j, '#'):
            if trace: g.trace('underindent: comment')
            if underIndentedStart is None: underIndentedStart = i
            i = j
        elif g.match(s, j, '\n'):
            if trace: g.trace('underindent: blank line')
            # Blank lines never start the range of underindented lines.
            i = j
        else:
            if trace: g.trace('underindent: end of block')
            breakFlag = True # The actual end of the block.
    else:
        if underIndentedStart and g.match(s, j, '\n'):
            # Add the blank line to the underindented range.
            if trace: g.trace('properly indented blank line extends underindent range')
        elif underIndentedStart and g.match(s, j, '#'):
            # Add the (properly indented!) comment line to the underindented range.
            if trace: g.trace('properly indented comment line extends underindent range')
        elif underIndentedStart is None:
            pass
        else:
            # A properly indented non-comment line.
            # Give a message for all underindented comments in underindented range.
            if trace: g.trace('properly indented line generates underindent errors')
            s2 = s[underIndentedStart: i]
            lines = g.splitlines(s2)
            for line in lines:
                if line.strip():
                    junk, indent = g.skip_leading_ws_with_indent(line, 0, self.tab_width)
                    if indent <= startIndent:
                        if j not in self.errorLines: # No error yet given.
                            self.errorLines.append(j)
                            self.underindentedComment(line)
            underIndentedStart = None
    if trace: g.trace('breakFlag', breakFlag, 'returns', i, 'underIndentedStart', underIndentedStart)
    return i, underIndentedStart, breakFlag

#@+node:ekr.20170123041842.32: *6* skipToTheNextClassOrFunction
def skipToTheNextClassOrFunction(self, s, i, lastIndent):
    '''Skip to the next python def or class.
    Return the original i if nothing more is found.
    This allows the "if __name__ == '__main__' hack
    to appear at the top level.'''
    return i # A rewrite is needed.

# This must be overridden in order to handle newlines properly.

#@+node:ekr.20170123041842.33: *6* skipSigTail
def skipSigTail(self, s, i, kind):
    '''Skip from the end of the arg list to the start of the block.'''
    if 1: # New code
        while i < len(s):
            ch = s[i]
            if ch == ':':
                return i, True
            elif ch == '\n':
                return i, False
            elif self.startsComment(s, i):
                i = self.skipComment(s, i)
            else:
                i += 1
        return i, False

#@+node:ekr.20170123041842.34: *6* skipString
def skipString(self, s, i):
    # Returns len(s) on unterminated string.
    return g.skip_python_string(s, i, verbose=False)

#@+node:ekr.20150312225028.127: *3* lengthHelper (LeoQTextEditWidget)
###
### Do not use this code. It is extremely slow for large text.
###
def lengthHelper(self):
    '''Return the length of the text.'''
    traceTime = False and not g.unitTesting
    if traceTime: t1 = time.time()
    w = self.widget
    tc = w.textCursor()
    tc.movePosition(QtGui.QTextCursor.End)
    n = tc.position()
    if traceTime:
        delta_t = time.time()-t1
        if delta_t > 0.1: g.trace('=========== %2.3f sec' % (delta_t))
    return n
#@+node:ekr.20150312225028.29: *3* leoViews...
This was a major project, now abandoned.
#@+node:ekr.20150312225028.31: *4* class OrganizerData
class OrganizerData:
    '''A class containing all data for a particular organizer node.'''
    def __init__ (self,h,unl,unls):
        self.anchor = None # The anchor position of this od node.
        self.children = [] # The direct child od nodes of this od node.
        self.closed = False # True: this od node no longer accepts new child od nodes.
        self.drop = True # Drop the unl for this od node when associating positions with unls.
        self.descendants = None # The descendant od nodes of this od node.
        self.exists = False # True: this od was created by @existing-organizer:
        self.h = h # The headline of this od node.
        self.moved = False # True: the od node has been moved to a global move list.
        self.opened = False # True: the od node has been opened.
        self.organized_nodes = [] # The list of positions organized by this od node.
        self.parent_od = None # The parent od node of this od node. (None is valid.)
        self.p = None # The position of this od node.
        self.parent = None # The original parent position of all nodes organized by this od node.
            # If parent_od is None, this will be the parent position of the od node.
        self.source_unl = None # The unl of self.parent.
        self.unl = unl # The unl of this od node.
        self.unls = unls # The unls contained in this od node.
        self.visited = False # True: demote_helper has already handled this od node.
    def __repr__(self):
        return 'OrganizerData: %s' % (self.h or '<no headline>')
    __str__ = __repr__
#@+node:ekr.20150312225028.32: *4* class ViewController
class ViewController:
    << docstring >>
    @others
#@+node:ekr.20150312225028.33: *5*  << docstring >> (class ViewController)
'''
A class to handle @views trees and related operations.
Such trees have the following structure:

- @views
  - @auto-view <unl of @auto node>
    - @organizers
      - @organizer <headline>
    - @clones
    
The body text of @organizer and @clones consists of unl's, one per line.
'''
#@+node:ekr.20150312225028.34: *5*  vc.ctor & vc.init
def __init__ (self,c):
    '''Ctor for ViewController class.'''
    self.c = c
    self.headline_ivar = '_imported_headline'
    self.init()
    
def init(self):
    '''
    Init all ivars of this class.
    Unit tests may call this method to ensure that this class is re-inited properly.
    '''
    self.all_ods = []
        # List of all od nodes.
    self.anchors_d = {}
        # Keys are anchoring positions, values are sorted lists of ods.
    self.anchor_offset_d = {}
        # Keys are anchoring positions, values are ints.
    self.existing_ods = []
        # List of od instances corresponding to @existing-organizer: nodes.
    self.global_bare_organizer_node_list = []
        # List of organizers that have no parent organizer node.
        # This list excludes existing organizer nodes.
    self.headlines_dict = {}
        # Keys are vnodes; values are list of child headlines.
    self.imported_organizers_list = []
        # The list of nodes that have children on entry, such as class nodes.
    self.n_nodes_scanned = 0
        # Number of nodes scanned by demote.
    self.organizer_ods = []
        # List of od instances corresponding to @organizer: nodes.
    self.organizer_unls = []
        # The list of od.unl for all od instances in self.organizer_ods.
    self.root = None
        # The position of the @auto node.
    self.pending = []
        # The list of nodes pending to be added to an organizer.
    self.stack = []
        # The stack containing real and virtual parent nodes during the main loop.
    self.temp_node = None
        # The parent position of all holding cells.
    self.trail_write_1 = None
        # The trial write on entry.
    self.views_node = None
        # The position of the @views node.
    self.work_list = []
        # A gloal list of (parent,child) tuples for all nodes that are
        # to be moved to **non-existing** organizer nodes.
        # **Important**: Nodes are moved in the order they appear in this list:
        # the tuples contain no childIndex component!
        # This list is the "backbone" of this class:
        # - The front end (demote and its helpers) adds items to this list.
        # - The back end (move_nodes and its helpers) moves nodes using this list.
#@+node:ekr.20150312225028.35: *5* vc.Entry points
#@+node:ekr.20150312225028.36: *6* vc.convert_at_file_to_at_auto
def convert_at_file_to_at_auto(self,root):
    # Define class ConvertController.
    @others
    vc = self
    c = vc.c
    if root.isAtFileNode():
        ConvertController(c,root).run()
    else:
        g.es_print('not an @file node:',root.h)
#@+node:ekr.20150312225028.37: *7* class ConvertController
class ConvertController:
    def __init__ (self,c,p):
        self.c = c
        # self.ic = c.importCommands
        self.vc = c.viewController
        self.root = p.copy()
    @others
#@+node:ekr.20150312225028.38: *8* cc.delete_at_auto_view_nodes
def delete_at_auto_view_nodes(self,root):
    '''Delete all @auto-view nodes pertaining to root.'''
    cc = self
    vc = cc.vc
    while True:
        p = vc.has_at_auto_view_node(root)
        if not p: break
        p.doDelete()
#@+node:ekr.20150312225028.39: *8* cc.import_from_string
def import_from_string(self,s):
    '''Import from s into a temp outline.'''
    cc = self # (ConvertController)
    c = cc.c
    ic = c.importCommands
    root = cc.root
    language = g.scanForAtLanguage(c,root) 
    ext = '.'+g.app.language_extension_dict.get(language)
    scanner = ic.scanner_for_ext(ext)
    # g.trace(language,ext,scanner.__name__)
    p = root.insertAfter()
    ok = scanner(atAuto=True,parent=p,s=s)
    p.h = root.h.replace('@file','@auto' if ok else '@@auto')
    return ok,p
#@+node:ekr.20150312225028.40: *8* cc.run
def run(self):
    '''Convert an @file tree to @auto tree.'''
    trace = True and not g.unitTesting
    trace_s = False
    cc = self
    c = cc.c
    root,vc = cc.root,c.viewController
    # set the headline_ivar for all vnodes.
    t1 = time.clock()
    cc.set_expected_imported_headlines(root)
    t2 = time.clock()
    # Delete all previous @auto-view nodes for this tree.
    cc.delete_at_auto_view_nodes(root)
    t3 = time.clock()
    # Ensure that all nodes of the tree are regularized.
    ok = vc.prepass(root)
    t4 = time.clock()
    if not ok:
        g.es_print('Can not convert',root.h,color='red')
        if trace: g.trace(
            '\n  set_expected_imported_headlines: %4.2f sec' % (t2-t1),
            # '\n  delete_at_auto_view_nodes:     %4.2f sec' % (t3-t2),
            '\n  prepass:                         %4.2f sec' % (t4-t3),
            '\n  total:                           %4.2f sec' % (t4-t1))
        return
    # Create the appropriate @auto-view node.
    at_auto_view = vc.update_before_write_at_auto_file(root)
    t5 = time.clock()
    # Write the @file node as if it were an @auto node.
    s = cc.strip_sentinels()
    t6 = time.clock()
    if trace and trace_s:
        g.trace('source file...\n',s)
    # Import the @auto string.
    ok,p = cc.import_from_string(s)
    t7 = time.clock()
    if ok:
        # Change at_auto_view.b so it matches p.gnx.
        at_auto_view.b = vc.at_auto_view_body(p)
        # Recreate the organizer nodes, headlines, etc.
        ok = vc.update_after_read_at_auto_file(p)
        t8 = time.clock()
        if not ok:
            p.h = '@@' + p.h
            g.trace('restoring original @auto file')
            ok,p = cc.import_from_string(s)
            if ok:
                p.h = '@@' + p.h + ' (restored)'
                if p.next():
                    p.moveAfter(p.next())
        t9 = time.clock()
    else:
        t8 = t9 = time.clock()
    if trace: g.trace(
        '\n  set_expected_imported_headlines: %4.2f sec' % (t2-t1),
        # '\n  delete_at_auto_view_nodes:     %4.2f sec' % (t3-t2),
        '\n  prepass:                         %4.2f sec' % (t4-t3),
        '\n  update_before_write_at_auto_file:%4.2f sec' % (t5-t4),
        '\n  strip_sentinels:                 %4.2f sec' % (t6-t5),
        '\n  import_from_string:              %4.2f sec' % (t7-t6),
        '\n  update_after_read_at_auto_file   %4.2f sec' % (t8-t7),
        '\n  import_from_string (restore)     %4.2f sec' % (t9-t8),
        '\n  total:                           %4.2f sec' % (t9-t1))
    if p:
        c.selectPosition(p)
    c.redraw()
#@+node:ekr.20150312225028.41: *8* cc.set_expected_imported_headlines
def set_expected_imported_headlines(self,root):
    '''Set the headline_ivar for all vnodes.'''
    trace = False and not g.unitTesting
    cc = self
    c = cc.c
    ic = cc.c.importCommands
    language = g.scanForAtLanguage(c,root) 
    ext = '.'+g.app.language_extension_dict.get(language)
    aClass = ic.classDispatchDict.get(ext)
    scanner = aClass(importCommands=ic,atAuto=True)
    # Duplicate the fn logic from ic.createOutline.
    theDir = g.setDefaultDirectory(c,root,importing=True)
    fn = c.os_path_finalize_join(theDir,root.h)
    fn = root.h.replace('\\','/')
    junk,fn = g.os_path_split(fn)
    fn,junk = g.os_path_splitext(fn)
    if aClass and hasattr(scanner,'headlineForNode'):
        ivar = cc.vc.headline_ivar
        for p in root.subtree():
            if not hasattr(p.v,ivar):
                h = scanner.headlineForNode(fn,p)
                setattr(p.v,ivar,h)
                if trace and h != p.h:
                    g.trace('==>',h) # p.h,'==>',h
#@+node:ekr.20150312225028.42: *8* cc.strip_sentinels
def strip_sentinels(self):
    '''Write the file to a string without headlines or sentinels.'''
    trace = False and not g.unitTesting
    cc = self
    at = cc.c.atFileCommands
    # ok = at.writeOneAtAutoNode(cc.root,
        # toString=True,force=True,trialWrite=True)
    at.errors = 0
    at.write(cc.root,
        kind = '@file',
        nosentinels = True,
        perfectImportFlag = False,
        scriptWrite = False,
        thinFile = True,
        toString = True)
    ok = at.errors == 0
    s = at.stringOutput
    if trace: g.trace('ok:',ok,'s:...\n'+s)
    return s
#@+node:ekr.20150312225028.43: *6* vc.pack & helper
def pack(self):
    '''
    Undoably convert c.p to a packed @view node, replacing all cloned
    children of c.p by unl lines in c.p.b.
    '''
    vc = self
    c,u = vc.c,vc.c.undoer
    vc.init()
    changed = False
    root = c.p
    # Create an undo group to handle changes to root and @views nodes.
    # Important: creating the @views node does *not* invalidate any positions.'''
    u.beforeChangeGroup(root,'view-pack')
    if not vc.has_at_views_node():
        changed = True
        bunch = u.beforeInsertNode(c.rootPosition())
        views = vc.find_at_views_node()
            # Creates the @views node as the *last* top-level node
            # so that no positions become invalid as a result.
        u.afterInsertNode(views,'create-views-node',bunch)
    # Prepend @view if need.
    if not root.h.strip().startswith('@'):
        changed = True
        bunch = u.beforeChangeNodeContents(root)
        root.h = '@view ' + root.h.strip()
        u.afterChangeNodeContents(root,'view-pack-update-headline',bunch)
    # Create an @view node as a clone of the @views node.
    bunch = u.beforeInsertNode(c.rootPosition())
    new_clone = vc.create_view_node(root)
    if new_clone:
        changed = True
        u.afterInsertNode(new_clone,'create-view-node',bunch)
    # Create a list of clones that have a representative node
    # outside of the root's tree.
    reps = [vc.find_representative_node(root,p)
        for p in root.children()
            if vc.is_cloned_outside_parent_tree(p)]
    reps = [z for z in reps if z is not None]
    if reps:
        changed = True
        bunch = u.beforeChangeTree(root)
        c.setChanged(True)
        # Prepend a unl: line for each cloned child.
        unls = ['unl: %s\n' % (vc.unl(p)) for p in reps]
        root.b = ''.join(unls) + root.b
        # Delete all child clones in the reps list.
        v_reps = set([p.v for p in reps])
        while True:
            for child in root.children():
                if child.v in v_reps:
                    child.doDelete()
                    break
            else: break
        u.afterChangeTree(root,'view-pack-tree',bunch)
    if changed:
        u.afterChangeGroup(root,'view-pack')
        c.selectPosition(root)
        c.redraw()
#@+node:ekr.20150312225028.44: *7* vc.create_view_node
def create_view_node(self,root):
    '''
    Create a clone of root as a child of the @views node.
    Return the *newly* cloned node, or None if it already exists.
    '''
    vc = self
    c = vc.c
    # Create a cloned child of the @views node if it doesn't exist.
    views = vc.find_at_views_node()
    for p in views.children():
        if p.v == c.p.v:
            return None
    p = root.clone()
    p.moveToLastChildOf(views)
    return p
#@+node:ekr.20150312225028.45: *6* vc.unpack
def unpack(self):
    '''
    Undoably unpack nodes corresponding to leading unl lines in c.p to child clones.
    Return True if the outline has, in fact, been changed.
    '''
    vc = self
    c,root,u = vc.c,vc.c.p,vc.c.undoer
    vc.init()
    # Find the leading unl: lines.
    i,lines,tag = 0,g.splitLines(root.b),'unl:'
    for s in lines:
        if s.startswith(tag): i += 1
        else: break
    changed = i > 0
    if changed:
        bunch = u.beforeChangeTree(root)
        # Restore the body
        root.b = ''.join(lines[i:])
        # Create clones for each unique unl.
        unls = list(set([s[len(tag):].strip() for s in lines[:i]]))
        for unl in unls:
            p = vc.find_absolute_unl_node(unl)
            if p: p.clone().moveToLastChildOf(root)
            else: g.trace('not found: %s' % (unl))
        c.setChanged(True)
        c.undoer.afterChangeTree(root,'view-unpack',bunch)
        c.redraw()
    return changed
#@+node:ekr.20150312225028.46: *6* vc.update_before_write_at_auto_file
def update_before_write_at_auto_file(self,root):
    '''
    Update the @auto-view node for root, an @auto node. Create @organizer,
    @existing-organizer, @clones and @headlines nodes as needed.
    This *must not* be called for trial writes.
    '''
    trace = False and not g.unitTesting
    vc = self
    c = vc.c
    changed = False
    t1 = time.clock()
    # Create lists of cloned and organizer nodes.
    clones,existing_organizers,organizers = \
        vc.find_special_nodes(root)
    # Delete all children of the @auto-view node for this @auto node.
    at_auto_view = vc.find_at_auto_view_node(root)
    if at_auto_view.hasChildren():
        changed = True
        at_auto_view.deleteAllChildren()
    # Create the single @clones node.
    if clones:
        at_clones = vc.find_at_clones_node(root)
        at_clones.b = ''.join(
            ['gnx: %s\nunl: %s\n' % (z[0],z[1]) for z in clones])
    # Create the single @organizers node.
    if organizers or existing_organizers:
        at_organizers = vc.find_at_organizers_node(root)
    # Create one @organizers: node for each organizer node.
    for p in organizers:
        # g.trace('organizer',p.h)
        at_organizer = at_organizers.insertAsLastChild()
        at_organizer.h = '@organizer: %s' % p.h
        # The organizer node's unl is implicit in each child's unl.
        at_organizer.b = '\n'.join([
            'unl: '+vc.relative_unl(z,root) for z in p.children()])
    # Create one @existing-organizer node for each existing organizer.
    ivar = vc.headline_ivar
    for p in existing_organizers:
        at_organizer = at_organizers.insertAsLastChild()
        h = getattr(p.v,ivar,p.h)
        if trace and h != p.h: g.trace('==>',h) # p.h,'==>',h
        at_organizer.h = '@existing-organizer: %s' % h
        # The organizer node's unl is implicit in each child's unl.
        at_organizer.b = '\n'.join([
            'unl: '+vc.relative_unl(z,root) for z in p.children()])
    # Create the single @headlines node.
    vc.create_at_headlines(root)
    if changed and not g.unitTesting:
        g.es_print('updated @views node in %4.2f sec.' % (
            time.clock()-t1))
    if changed:
        c.redraw()
    return at_auto_view # For at-file-to-at-auto command.
#@+node:ekr.20150312225028.47: *7* vc.create_at_headlines
def create_at_headlines(self,root):
    '''Create the @headlines node for root, an @auto file.'''
    vc = self
    c = vc.c
    result = []
    ivar = vc.headline_ivar
    for p in root.subtree():
        h = getattr(p.v,ivar,None)
        if h is not None and p.h != h:
            # g.trace('custom:',p.h,'imported:',h)
            unl = vc.relative_unl(p,root)
            aList = unl.split('-->')
            aList[-1] = h
            unl = '-->'.join(aList)
            result.append('imported unl: %s\nhead: %s\n' % (
                unl,p.h))
            delattr(p.v,ivar)
    if result:
        p = vc.find_at_headlines_node(root)
        p.b = ''.join(result)
#@+node:ekr.20150312225028.48: *7* vc.find_special_nodes
def find_special_nodes(self,root):
    '''
    Scan root's tree, looking for organizer and cloned nodes.
    Exclude organizers on imported organizers list.
    '''
    trace = False and not g.unitTesting
    verbose = False
    vc = self
    clones,existing_organizers,organizers = [],[],[]
    if trace: g.trace('imported existing',
        [v.h for v in vc.imported_organizers_list])
    for p in root.subtree():
        if p.isCloned():
            rep = vc.find_representative_node(root,p)
            if rep:
                unl = vc.relative_unl(p,root)
                gnx = rep.v.gnx
                clones.append((gnx,unl),)
        if p.v in vc.imported_organizers_list:
            # The node had children created by the importer.
            if trace and verbose: g.trace('ignore imported existing',p.h)
        elif vc.is_organizer_node(p,root):
            # p.hasChildren and p.b is empty, except for comments.
            if trace and verbose: g.trace('organizer',p.h)
            organizers.append(p.copy())
        elif p.hasChildren():
            if trace and verbose: g.trace('existing',p.h)
            existing_organizers.append(p.copy())
    return clones,existing_organizers,organizers
#@+node:ekr.20150312225028.49: *6* vc.update_after_read_at_auto_file & helpers
def update_after_read_at_auto_file(self,root):
    '''
    Recreate all organizer nodes and clones for a single @auto node
    using the corresponding @organizer: and @clones nodes.
    '''
    trace = True and not g.unitTesting
    vc = self
    c = vc.c
    if not vc.is_at_auto_node(root):
        return # Not an error: it might be and @auto-rst node.
    old_changed = c.isChanged()
    try:
        vc.init()
        vc.root = root.copy()
        t1 = time.clock()
        vc.trial_write_1 = vc.trial_write(root)
        t2 = time.clock()
        at_organizers = vc.has_at_organizers_node(root)
        t3 = time.clock()
        if at_organizers:
            vc.create_organizer_nodes(at_organizers,root)
        t4 = time.clock()
        at_clones = vc.has_at_clones_node(root)
        if at_clones:
            vc.create_clone_links(at_clones,root)
        t5 = time.clock()
        n = len(vc.work_list)
        ok = vc.check(root)
        t6 = time.clock()
        if ok:
            vc.update_headlines_after_read(root)
        t7 = time.clock()
        c.setChanged(old_changed if ok else False)
            ### To do: revert if not ok.
    except Exception:
        g.es_exception()
        n = 0
        ok = False
    if trace:
        if t7-t1 > 0.5: g.trace(
            '\n  trial_write:                 %4.2f sec' % (t2-t1),
            # '\n  has_at_organizers_node:    %4.2f sec' % (t3-t2),
            '\n  create_organizer_nodes:      %4.2f sec' % (t4-t3),
            '\n  create_clone_links:          %4.2f sec' % (t5-t4),
            '\n  check:                       %4.2f sec' % (t6-t5),
            '\n  update_headlines_after_read: %4.2f sec' % (t7-t6),
            '\n  total:                       %4.2f sec' % (t7-t1))
            # '\n  file:',root.h)
        # else: g.trace('total: %4.2f sec' % (t7-t1),root.h)
    if ok and n > 0:
        g.es('rearragned: %s' % (root.h),color='blue')
        g.es('moved %s nodes in %4.2f sec.' % (n,t7-t1))
        g.trace('@auto-view moved %s nodes in %4.2f sec. for' % (
            n,t2),root.h,noname=True)
    c.selectPosition(root)
    c.redraw()
    return ok
#@+node:ekr.20150312225028.50: *7* vc.check
def check (self,root):
    '''
    Compare a trial write or root with the vc.trail_write_1.
    Unlike the perfect-import checks done by the importer,
    we expecct an *exact* match, regardless of language.
    '''
    trace = True # and not g.unitTesting
    vc = self
    trial1 = vc.trial_write_1
    trial2 = vc.trial_write(root)
    if trial1 != trial2:
        g.pr('') # Don't use print: it does not appear with the traces.
        g.es_print('perfect import check failed for:',color='red')
        g.es_print(root.h,color='red')
        if trace:
            vc.compare_trial_writes(trial1,trial2)
            g.pr('')
    return trial1 == trial2
#@+node:ekr.20150312225028.51: *7* vc.create_clone_link
def create_clone_link(self,gnx,root,unl):
    '''
    Replace the node in the @auto tree with the given unl by a
    clone of the node outside the @auto tree with the given gnx.
    '''
    trace = False and not g.unitTesting
    vc = self
    p1 = vc.find_position_for_relative_unl(root,unl)
    p2 = vc.find_gnx_node(gnx)
    if p1 and p2:
        if trace: g.trace('relink',gnx,p2.h,'->',p1.h)
        if p1.b == p2.b:
            p2._relinkAsCloneOf(p1)
            return True
        else:
            g.es('body text mismatch in relinked node',p1.h)
            return False
    else:
        if trace: g.trace('relink failed',gnx,root.h,unl)
        return False
#@+node:ekr.20150312225028.52: *7* vc.create_clone_links
def create_clone_links(self,at_clones,root):
    '''
    Recreate clone links from an @clones node.
    @clones nodes contain pairs of lines (gnx,unl)
    '''
    vc = self
    lines = g.splitLines(at_clones.b)
    gnxs = [s[4:].strip() for s in lines if s.startswith('gnx:')]
    unls = [s[4:].strip() for s in lines if s.startswith('unl:')]
    # g.trace('at_clones.b',at_clones.b)
    if len(gnxs) == len(unls):
        vc.headlines_dict = {} # May be out of date.
        ok = True
        for gnx,unl in zip(gnxs,unls):
            ok = ok and vc.create_clone_link(gnx,root,unl)
        return ok
    else:
        g.trace('bad @clones contents',gnxs,unls)
        return False
#@+node:ekr.20150312225028.53: *7* vc.create_organizer_nodes & helpers
def create_organizer_nodes(self,at_organizers,root):
    '''
    root is an @auto node. Create an organizer node in root's tree for each
    child @organizer: node of the given @organizers node.
    '''
    vc = self
    c = vc.c
    trace = False and not g.unitTesting
    t1 = time.clock()
    vc.pre_move_comments(root)
        # Merge comment nodes with the next node.
    t2 = time.clock()
    vc.precompute_all_data(at_organizers,root)
        # Init all data required for reading.
    t3 = time.clock()
    vc.demote(root)
        # Traverse root's tree, adding nodes to vc.work_list.
    t4 = time.clock()
    vc.move_nodes()
        # Move nodes on vc.work_list to their final locations.
    t5 = time.clock()
    vc.post_move_comments(root)
        # Move merged comments to parent organizer nodes.
    t6 = time.clock()
    if trace: g.trace(
        '\n  pre_move_comments:   %4.2f sec' % (t2-t1),
        '\n  precompute_all_data: %4.2f sec' % (t3-t2),
        '\n  demote:              %4.2f sec' % (t4-t3),
        '\n  move_nodes:          %4.2f sec' % (t5-t4),
        '\n  post_move_comments:  %4.2f sec' % (t6-t5))
#@+node:ekr.20150312225028.54: *7* vc.update_headlines_after_read
def update_headlines_after_read(self,root):
    '''Handle custom headlines for all imported nodes.'''
    trace = False and not g.unitTesting
    vc = self
    # Remember the original imported headlines.
    ivar = vc.headline_ivar
    for p in root.subtree():
        if not hasattr(p.v,ivar):
            setattr(p.v,ivar,p.h)
    # Update headlines from @headlines nodes.
    at_headlines = vc.has_at_headlines_node(root)
    tag1,tag2 = 'imported unl: ','head: '
    n1,n2 = len(tag1),len(tag2)
    if at_headlines:
        lines = g.splitLines(at_headlines.b)
        unls  = [s[n1:].strip() for s in lines if s.startswith(tag1)]
        heads = [s[n2:].strip() for s in lines if s.startswith(tag2)]
    else:
        unls,heads = [],[]
    if len(unls) == len(heads):
        vc.headlines_dict = {} # May be out of date.
        for unl,head in zip(unls,heads):
            p = vc.find_position_for_relative_unl(root,unl)
            if p:
                if trace: g.trace('unl:',unl,p.h,'==>',head)
                p.h = head
    else:
        g.trace('bad @headlines body',at_headlines.b)
#@+node:ekr.20150312225028.55: *5* vc.Main Lines
#@+node:ekr.20150312225028.56: *6* vc.precompute_all_data & helpers
def precompute_all_data(self,at_organizers,root):
    '''Precompute all data needed to reorganize nodes.'''
    trace = False and not g.unitTesting
    vc = self
    t1 = time.clock() 
    vc.find_imported_organizer_nodes(root)
        # Put all nodes with children on vc.imported_organizer_node_list
    t2 = time.clock()
    vc.create_organizer_data(at_organizers,root)
        # Create OrganizerData objects for all @organizer:
        # and @existing-organizer: nodes.
    t3 = time.clock()
    vc.create_actual_organizer_nodes()
        # Create the organizer nodes in holding cells so positions remain valid.
    t4 = time.clock()
    vc.create_tree_structure(root)
        # Set od.parent_od, od.children & od.descendants for all ods.
    t5 = time.clock()
    vc.compute_all_organized_positions(root)
        # Compute the positions organized by each organizer.
        # ** Most of the time is spent here **.
    t6 = time.clock()
    vc.create_anchors_d()
        # Create the dictionary that associates positions with ods.
    t7 = time.clock()
    if trace: g.trace(
        '\n  find_imported_organizer_nodes:   %4.2f sec' % (t2-t1),
        '\n  create_organizer_data:           %4.2f sec' % (t3-t2),
        '\n  create_actual_organizer_nodes:   %4.2f sec' % (t4-t3),
        '\n  create_tree_structure:           %4.2f sec' % (t5-t4),
        '\n  compute_all_organized_positions: %4.2f sec' % (t6-t5),
        '\n  create_anchors_d:                %4.2f sec' % (t7-t6))
#@+node:ekr.20150312225028.57: *7* 1: vc.find_imported_organizer_nodes
def find_imported_organizer_nodes(self,root):
    '''
    Put the VNode of all imported nodes with children on
    vc.imported_organizers_list.
    '''
    trace = False # and not g.unitTesting
    vc = self
    aList = []
    for p in root.subtree():
        if p.hasChildren():
            aList.append(p.v)
    vc.imported_organizers_list = list(set(aList))
    if trace: g.trace([z.h for z in vc.imported_organizers_list])
#@+node:ekr.20150312225028.58: *7* 2: vc.create_organizer_data (od.p & od.parent)
def create_organizer_data(self,at_organizers,root):
    '''
    Create OrganizerData nodes for all @organizer: and @existing-organizer:
    nodes in the given @organizers node.
    '''
    vc = self
    vc.create_ods(at_organizers)
    vc.finish_create_organizers(root)
    vc.finish_create_existing_organizers(root)
    for od in vc.all_ods:
        assert od.parent,(od.exists,od.h)
#@+node:ekr.20150312225028.59: *8* vc.create_ods
def create_ods(self,at_organizers):
    '''Create all organizer nodes and the associated lists.'''
    # Important: we must completely reinit all data here.
    vc = self
    tag1 = '@organizer:'
    tag2 = '@existing-organizer:'
    vc.all_ods,vc.existing_ods,vc.organizer_ods = [],[],[]
    for at_organizer in at_organizers.children():
        h = at_organizer.h
        for tag in (tag1,tag2):
            if h.startswith(tag):
                unls = vc.get_at_organizer_unls(at_organizer)
                if unls:
                    organizer_unl = vc.drop_unl_tail(unls[0])
                    h = h[len(tag):].strip()
                    od = OrganizerData(h,organizer_unl,unls)
                    vc.all_ods.append(od)
                    if tag == tag1:
                        vc.organizer_ods.append(od)
                        vc.organizer_unls.append(organizer_unl)
                    else:
                        vc.existing_ods.append(od)
                        # Do *not* append organizer_unl to the unl list.
                else:
                    g.trace('===== no unls:',at_organizer.h)
#@+node:ekr.20150312225028.60: *8* vc.finish_create_organizers
def finish_create_organizers(self,root):
    '''Finish creating all organizers.'''
    trace = False # and not g.unitTesting
    vc = self
    # Careful: we may delete items from this list.
    for od in vc.organizer_ods[:]: 
        od.source_unl = vc.source_unl(vc.organizer_unls,od.unl)
        od.parent = vc.find_position_for_relative_unl(root,od.source_unl)
        if od.parent:
            od.anchor = od.parent
            if trace: g.trace(od.h,
                # '\n  exists:',od.exists,
                # '\n  unl:',od.unl,
                # '\n  source (unl):',od.source_unl or repr(''),
                # '\n  anchor (pos):',od.anchor.h,
                # '\n  parent (pos):',od.parent.h,
            )
        else:
            # This is, most likely, a true error.
            g.trace('===== removing od:',od.h)
            vc.organizer_ods.remove(od)
            vc.all_ods.remove(od)
            assert od not in vc.existing_ods
            assert od not in vc.all_ods
#@+node:ekr.20150312225028.61: *8* vc.finish_create_existing_organizers
def finish_create_existing_organizers(self,root):
    '''Finish creating existing organizer nodes.'''
    trace = False # and not g.unitTesting
    vc = self
    # Careful: we may delete items from this list.
    for od in vc.existing_ods[:]:
        od.exists = True
        assert od.unl not in vc.organizer_unls
        od.source_unl = vc.source_unl(vc.organizer_unls,od.unl)
        od.p = vc.find_position_for_relative_unl(root,od.source_unl)
        if od.p:
            od.anchor = od.p
            assert od.p.h == od.h,(od.p.h,od.h)  
            od.parent = od.p # Here, od.parent represents the "source" p.
            if trace: g.trace(od.h,
                # '\n  exists:',od.exists,
                # '\n  unl:',od.unl,
                # '\n  source (unl):',od.source_unl or repr(''),
                # '\n  anchor (pos):',od.anchor.h,
                # '\n  parent (pos):',od.parent.h,
            )
        else:
            # This arises when the imported node name doesn't match.
            g.trace('===== removing existing organizer:',od.h)
            vc.existing_ods.remove(od)
            vc.all_ods.remove(od)
            assert od not in vc.existing_ods
            assert od not in vc.all_ods

#@+node:ekr.20150312225028.62: *7* 3: vc.create_actual_organizer_nodes
def create_actual_organizer_nodes(self):
    '''
    Create all organizer nodes as children of holding cells. These holding
    cells ensure that moving an organizer node leaves all other positions
    unchanged.
    '''
    vc = self
    c = vc.c
    last = c.lastTopLevel()
    temp = vc.temp_node = last.insertAfter()
    temp.h = 'ViewController.temp_node'
    for od in vc.organizer_ods:
        holding_cell = temp.insertAsLastChild()
        holding_cell.h = 'holding cell for ' + od.h
        od.p = holding_cell.insertAsLastChild()
        od.p.h = od.h
#@+node:ekr.20150312225028.63: *7* 4: vc.create_tree_structure & helper
def create_tree_structure(self,root):
    '''Set od.parent_od, od.children & od.descendants for all ods.'''
    trace = False and not g.unitTesting
    vc = self
    # if trace: g.trace([z.h for z in data_list],g.callers())
    organizer_unls = [z.unl for z in vc.all_ods]
    for od in vc.all_ods:
        for unl in od.unls:
            if unl in organizer_unls:
                i = organizer_unls.index(unl)
                d2 = vc.all_ods[i]
                # if trace: g.trace('found organizer unl:',od.h,'==>',d2.h)
                od.children.append(d2)
                d2.parent_od = od
    # create_organizer_data now ensures od.parent is set.
    for od in vc.all_ods:
        assert od.parent,od.h
    # Extend the descendant lists.
    for od in vc.all_ods:
        vc.compute_descendants(od)
        assert od.descendants is not None
    if trace:
        def tail(head,unl):
            return str(unl[len(head):]) if unl.startswith(head) else str(unl)
        for od in vc.all_ods:
            g.trace(
                '\n  od:',od.h,
                '\n  unl:',od.unl,
                '\n  unls:', [tail(od.unl,z) for z in od.unls],
                '\n  source (unl):',od.source_unl or repr(''),
                '\n  parent (pos):', od.parent.h,
                '\n  children:',[z.h for z in od.children],
                '\n  descendants:',[str(z.h) for z in od.descendants])
#@+node:ekr.20150312225028.64: *8* vc.compute_descendants
def compute_descendants(self,od,level=0,result=None):
    '''Compute the descendant od nodes of od.'''
    trace = False # and not g.unitTesting
    vc = self
    if level == 0:
        result = []
    if od.descendants is None:
        for child in od.children:
            result.append(child)
            result.extend(vc.compute_descendants(child,level+1,result))
            result = list(set(result))
        if level == 0:
            od.descendants = result
            if trace: g.trace(od.h,[z.h for z in result])
        return result
    else:
        if trace: g.trace('cached',od.h,[z.h for z in od.descendants])
        return od.descendants
#@+node:ekr.20150312225028.65: *7* 5: vc.compute_all_organized_positions
def compute_all_organized_positions(self,root):
    '''Compute the list of positions organized by every od.'''
    trace = False and not g.unitTesting
    vc = self
    for od in vc.all_ods:
        if od.unls:
            # Do a full search only for the first unl.
            ### parent = vc.find_position_for_relative_unl(root,od.unls[0])
            if True: ### parent:
                for unl in od.unls:
                    p = vc.find_position_for_relative_unl(root,unl)
                    ### p = vc.find_position_for_relative_unl(parent,vc.unl_tail(unl))
                    if p:
                        od.organized_nodes.append(p.copy())
                    if trace: g.trace('exists:',od.exists,
                        'od:',od.h,'unl:',unl,
                        'p:',p and p.h or '===== None')
            else:
                g.trace('fail',od.unls[0])
#@+node:ekr.20150312225028.66: *7* 6: vc.create_anchors_d
def create_anchors_d (self):
    '''
    Create vc.anchors_d.
    Keys are positions, values are lists of ods having that anchor.
    '''
    trace = False # and not g.unitTesting
    vc = self
    d = {}
    if trace: g.trace('all_ods',[z.h for z in vc.all_ods])
    for od in vc.all_ods:
        # Compute the anchor if it does not yet exists.
        # Valid now that p.__hash__ exists.
        key = od.anchor
        # key = '.'.join([str(z) for z in od.anchor.sort_key(od.anchor)])
        # key = '%s (%s)' % (key,od.anchor.h)
        aList = d.get(key,[])
        # g.trace(od.h,od.anchor.h,key,aList)
        aList.append(od)
        d[key] = aList
    if trace:
        for key in sorted(d.keys()):
            g.trace('od.anchor: %s ods: [%s]' % (key.h,','.join(z.h for z in d.get(key))))
    vc.anchors_d = d
#@+node:ekr.20150312225028.67: *6* vc.demote & helpers
def demote(self,root):
    '''
    The main line of the @auto-view algorithm. Traverse root's entire tree,
    placing items on the global work list.
    '''
    trace = False # and not g.unitTesting
    trace_loop = True
    vc = self
    active = None # The active od.
    vc.pending = [] # Lists of pending demotions.
    d = vc.anchor_offset_d # For traces.
    for p in root.subtree():
        parent = p.parent()
        if trace and trace_loop:
            if 1:
                g.trace('-----',p.childIndex(),p.h)
            else:
                g.trace(
                    '=====\np:',p.h,
                    'childIndex',p.childIndex(),
                    '\nparent:',parent.h,
                    'parent:offset',d.get(parent,0))
        vc.n_nodes_scanned += 1
        vc.terminate_organizers(active,parent)
        found = vc.find_organizer(parent,p)
        if found:
            pass ### vc.enter_organizers(found,p)
        else:
            pass ### vc.terminate_all_open_organizers()
        if trace and trace_loop:
            g.trace(
                'active:',active and active.h or 'None',
                'found:',found and found.h or 'None')
        # The main case statement...
        if found is None and active:
            vc.add_to_pending(active,p)
        elif found is None and not active:
            # Pending nodes will *not* be organized.
            vc.clear_pending(None,p)
        elif found and found == active:
            # Pending nodes *will* be organized.
            for z in vc.pending:
                active2,child2 = z
                vc.add(active2,child2,'found==active:pending')
            vc.pending = []
            vc.add(active,p,'found==active')
        elif found and found != active:
            # Pending nodes will *not* be organized.
            vc.clear_pending(found,p)
            active = found
            vc.enter_organizers(found,p)
            vc.add(active,p,'found!=active')
        else: assert False,'can not happen'
#@+node:ekr.20150312225028.68: *7* vc.add
def add(self,active,p,tag):
    '''
    Add p, an existing (imported) node to the global work list.
    Subtract 1 from the vc.anchor_offset_d entry for p.parent().
    
    Exception: do *nothing* if p is a child of an existing organizer node.
    '''
    trace = False # and not g.unitTesting
    verbose = False
    vc = self
    # g.trace(active,g.callers())
    if active.p == p.parent() and active.exists:
        if trace and verbose: g.trace('===== do nothing',active.h,p.h)
    else:
        data = active.p,p.copy()
        vc.add_to_work_list(data,tag)
        vc.anchor_decr(anchor=p.parent(),p=p)
        
#@+node:ekr.20150312225028.69: *7* vc.add_organizer_node
def add_organizer_node (self,od,p):
    '''
    Add od to the appropriate move list.
    p is the existing node that caused od to be added.
    '''
    trace = True # and not g.unitTesting
    verbose = False
    vc = self
    # g.trace(od.h,'parent',od.parent_od and od.parent_od.h or 'None')
    if od.parent_od:
        # Not a bare organizer: a child of another organizer node.
        # If this is an existing organizer, it's *position* may have
        # been moved without active.moved being set.
        data = od.parent_od.p,od.p
        if data in vc.work_list:
            if trace and verbose: g.trace(
                '**** duplicate 1: setting moved bit.',od.h)
            od.moved = True
        elif od.parent_od.exists:    
            anchor = od.parent_od.p
            n = vc.anchor_incr(anchor,p) + p.childIndex()
            data = anchor,od.p,n
            # g.trace('anchor:',anchor.h,'p:',p.h,'childIndex',p.childIndex())
            vc.add_to_bare_list(data,'non-bare existing')
        else:
            vc.add_to_work_list(data,'non-bare')
    elif od.p == od.anchor:
        if trace and verbose: g.trace(
            '***** existing organizer: do not move:',od.h)
    else:
        ### This can be pre-computed?
        bare_list = [p for parent,p,n in vc.global_bare_organizer_node_list]
        if od.p in bare_list:
            if trace and verbose: g.trace(
                '**** duplicate 2: setting moved bit.',od.h)
            od.moved = True
        else:
            # A bare organizer node: a child of an *ordinary* node.
            anchor = p.parent()
            n = vc.anchor_incr(anchor,p) + p.childIndex()
            data = anchor,od.p,n
            vc.add_to_bare_list(data,'bare')
#@+node:ekr.20150312225028.70: *7* vc.add_to_bare_list
def add_to_bare_list(self,data,tag):
    '''Add data to the bare organizer list, with tracing.'''
    trace = False # and not g.unitTesting
    vc = self
    # Prevent duplicagtes.
    anchor,p,n = data
    for data2 in vc.global_bare_organizer_node_list:
        a2,p2,n2 = data2
        if p == p2:
            if trace: g.trace('ignore duplicate',
                'n:',n,anchor.h,'==>',p.h)
            return
    vc.global_bare_organizer_node_list.append(data)
    if trace:
        anchor,p,n = data
        g.trace('=====',tag,'n:',n,anchor.h,'==>',p.h)
            # '\n  anchor:',anchor.h,
            # '\n  p:',p.h)
#@+node:ekr.20150312225028.71: *7* vc.add_to_pending
def add_to_pending(self,active,p):
    trace = False # and not g.unitTesting
    vc = self
    if trace: g.trace(active.p.h,'==>',p.h)
    vc.pending.append((active,p.copy()),)
#@+node:ekr.20150312225028.72: *7* vc.add_to_work_list
def add_to_work_list(self,data,tag):
    '''Append the data to the work list, with tracing.'''
    trace = False # and not g.unitTesting
    vc = self
    vc.work_list.append(data)
    if trace:
        active,p = data
        g.trace('=====',tag,active.h,'==>',p.h)
#@+node:ekr.20150312225028.73: *7* vc.anchor_decr
def anchor_decr(self,anchor,p): # p is only for traces.
    '''
    Decrement the anchor dict for the given anchor node.
    Return the *previous* value.
    '''
    trace = False # and not g.unitTesting
    vc = self
    d = vc.anchor_offset_d
    n = d.get(anchor,0)
    d[anchor] = n - 1
    if trace: g.trace(n-1,anchor.h,'==>',p.h)
    return n
#@+node:ekr.20150312225028.74: *7* vc.anchor_incr
def anchor_incr(self,anchor,p): # p is only for traces.
    '''
    Increment the anchor dict for the given anchor node.
    Return the *previous* value.
    '''
    trace = False # and not g.unitTesting
    vc = self
    d = vc.anchor_offset_d
    n = d.get(anchor,0)
    d[anchor] = n + 1
    if trace: g.trace(n+1,anchor.h,'==>',p.h)
    return n
#@+node:ekr.20150312225028.75: *7* vc.clear_pending
def clear_pending(self,active,p):
    '''Clear the appropriate entries from the pending list.'''
    trace = False # and not g.unitTesting
    vc = self
    if trace: g.trace('===== clear pending',len(vc.pending))
    if False: # active and active.parent_od:
        for data in vc.pending:
            data = active.parent_od.p,data[1]
            vc.add_to_work_list(data,'clear-pending-to-active')
    vc.pending = []
#@+node:ekr.20150312225028.76: *7* vc.enter_organizers
def enter_organizers(self,od,p):
    '''Enter all organizers whose anchors are p.'''
    vc = self
    ods = []
    while od:
        ods.append(od)
        od = od.parent_od
    if ods:
        for od in reversed(ods):
            vc.add_organizer_node(od,p)
#@+node:ekr.20150312225028.77: *7* vc.find_organizer
def find_organizer(self,parent,p):
    '''Return the organizer that organizers p, if any.'''
    trace = False # and not g.unitTesting
    vc = self
    anchor = parent
    ods = vc.anchors_d.get(anchor,[])
    for od in ods:
        if p in od.organized_nodes:
            if trace: g.trace('found:',od.h,'for',p.h)
            return od
    return None
#@+node:ekr.20150312225028.78: *7* vc.terminate_organizers
def terminate_organizers(self,active,p):
    '''Terminate all organizers whose anchors are not ancestors of p.'''
    trace = False # and not g.unitTesting
    od = active
    while od and od.anchor != p and od.anchor.isAncestorOf(p):
        if not od.closed:
            if trace: g.trace('===== closing',od.h)
            od.closed = True
        od = od.parent_od
#@+node:ekr.20150312225028.79: *7* vc.terminate_all_open_organizers
def terminate_all_open_organizers(self):
    '''Terminate all open organizers.'''
    trace = True # and not g.unitTesting
    if 0: ###
        g.trace()
        for od in self.all_ods:
            if od.opened and not od.closed:
                if trace: g.trace('===== closing',od.h)
                od.closed = True
#@+node:ekr.20150312225028.80: *6* vc.move_nodes & helpers
def move_nodes(self):
    '''Move nodes to their final location and delete the temp node.'''
    trace = False # and not g.unitTesting
    vc = self
    vc.move_nodes_to_organizers(trace)
    vc.move_bare_organizers(trace)
    vc.temp_node.doDelete()
#@+node:ekr.20150312225028.81: *7* vc.move_nodes_to_organizers
def move_nodes_to_organizers(self,trace):
    '''Move all nodes in the work_list.'''
    trace = False # and not g.unitTesting
    trace_dict = False
    trace_moves = False
    trace_deletes = False
    vc = self
    if trace: # A highly useful trace!
        g.trace('\n\nunsorted_list...\n%s' % (
            '\n'.join(['%40s ==> %s' % (parent.h,p.h)
                for parent,p in vc.work_list])))
    # Create a dictionary of each organizers children.
    d = {}
    for parent,p in vc.work_list:
        # This key must remain stable if parent moves.
        key = parent
        aList = d.get(key,[])
        aList.append(p)
        # g.trace(key,[z.h for z in aList])
        d[key] = aList
    if trace and trace_dict:
        # g.trace('d...',sorted([z.h for z in d.keys()]))
        g.trace('d{}...')
        for key in sorted(d.keys()):
            aList = [z.h for z in d.get(key)]
            g.trace('%s %-20s %s' % (id(key),key.h,vc.dump_list(aList,indent=29)))
    # Move *copies* of non-organizer nodes to each organizer.
    organizers = list(d.keys())
    existing_organizers = [z.p.copy() for z in vc.existing_ods]
    moved_existing_organizers = {} # Keys are vnodes, values are positions.
    for parent in organizers:
        aList = d.get(parent,[])
        if trace and trace_moves:
            g.trace('===== moving/copying:',parent.h,
                'with %s children:' % (len(aList)),
                '\n  '+'\n  '.join([z.h for z in aList]))
        for p in aList:
            if p in existing_organizers:
                if trace and trace_moves:
                    g.trace('copying existing organizer:',p.h)
                    g.trace('children:',
                    '\n  '+'\n  '.join([z.h for z in p.children()]))
                copy = vc.copy_tree_to_last_child_of(p,parent)
                old = moved_existing_organizers.get(p.v)
                if old and trace_moves:
                    g.trace('*********** overwrite',p.h)
                moved_existing_organizers[p.v] = copy
            elif p in organizers:
                if trace and trace_moves:
                    g.trace('moving organizer:',p.h)
                aList = d.get(p)
                if aList:
                    if trace and trace_moves: g.trace('**** relocating',
                        p.h,'children:',
                        '\n  '+'\n  '.join([z.h for z in p.children()]))
                    del d[p]
                p.moveToLastChildOf(parent)
                if aList:
                    d[p] = aList
            else:
                parent2 = moved_existing_organizers.get(parent.v)
                if parent2:
                    if trace and trace_moves:
                        g.trace('***** copying to relocated parent:',p.h)
                    vc.copy_tree_to_last_child_of(p,parent2)
                else:
                    if trace and trace_moves: g.trace('copying:',p.h)
                    vc.copy_tree_to_last_child_of(p,parent)
    # Finally, delete all the non-organizer nodes, in reverse outline order.
    def sort_key(od):
        parent,p = od
        return p.sort_key(p)
    sorted_list = sorted(vc.work_list,key=sort_key)
    if trace and trace_deletes:
        g.trace('===== deleting nodes in reverse outline order...')
    for parent,p in reversed(sorted_list):
        if p.v in moved_existing_organizers:
            if trace and trace_deletes:
                g.trace('deleting moved existing organizer:',p.h)
            p.doDelete()
        elif p not in organizers:
            if trace and trace_deletes:
                g.trace('deleting non-organizer:',p.h)
            p.doDelete()
#@+node:ekr.20150312225028.82: *7* vc.move_bare_organizers
def move_bare_organizers(self,trace):
    '''Move all nodes in global_bare_organizer_node_list.'''
    trace = False # and not g.unitTesting
    trace_data = True
    trace_move = True
    vc = self
    # For each parent, sort nodes on n.
    d = {} # Keys are vnodes, values are lists of tuples (n,parent,p)
    existing_organizers = [od.p for od in vc.existing_ods]
    if trace: g.trace('ignoring existing organizers:',
        [p.h for p in existing_organizers])
    for parent,p,n in vc.global_bare_organizer_node_list:
        if p not in existing_organizers:
            key = parent.v
            aList = d.get(key,[])
            if (parent,p,n) not in aList:
                aList.append((parent,p,n),)
                d[key] = aList
    # For each parent, add nodes in childIndex order.
    def key_func(obj):
        return obj[0]
    for key in d.keys():
        aList = d.get(key)
        for data in sorted(aList,key=key_func):
            parent,p,n = data
            n2 = parent.numberOfChildren()
            if trace and trace_data:
                g.trace(n,parent.h,'==>',p.h)
            if trace and trace_move: g.trace(
                'move: %-20s:' % (p.h),
                'to child: %2s' % (n),
                'of: %-20s' % (parent.h),
                'with:',n2,'children')
            p.moveToNthChildOf(parent,n)
#@+node:ekr.20150312225028.83: *7* vc.copy_tree_to_last_child_of
def copy_tree_to_last_child_of(self,p,parent):
    '''Copy p's tree to the last child of parent.'''
    vc = self
    assert p != parent,p
        # A failed assert leads to unbounded recursion.
    # print('copy_tree_to_last_child_of',p.h,parent.h)
    root = parent.insertAsLastChild()
    root.b,root.h = p.b,p.h
    root.v.u = copy.deepcopy(p.v.u)
    for child in p.children():
        vc.copy_tree_to_last_child_of(child,root)
    return root
#@+node:ekr.20150312225028.84: *5* vc.Helpers
#@+node:ekr.20150312225028.85: *6* vc.at_auto_view_body and match_at_auto_body
def at_auto_view_body(self,p):
    '''Return the body text for the @auto-view node for p.'''
    # Note: the unl of p relative to p is simply p.h,
    # so it is pointless to add that to the @auto-view node.
    return 'gnx: %s\n' % p.v.gnx

def match_at_auto_body(self,p,auto_view):
    '''Return True if any line of auto_view.b matches the expected gnx line.'''
    if 0: g.trace(p.b == 'gnx: %s\n' % auto_view.v.gnx,
        g.shortFileName(p.h),auto_view.v.gnx,p.b.strip())
    return p.b == 'gnx: %s\n' % auto_view.v.gnx
#@+node:ekr.20150312225028.86: *6* vc.clean_nodes (not used)
def clean_nodes(self):
    '''Delete @auto-view nodes with no corresponding @auto nodes.'''
    vc = self
    c = vc.c
    views = vc.has_at_views_node()
    if not views:
        return
    # Remember the gnx of all @auto nodes.
    d = {}
    for p in c.all_unique_positions():
        if vc.is_at_auto_node(p):
            d[p.v.gnx] = True
    # Remember all unused @auto-view nodes.
    delete = []
    for child in views.children():
        s = child.b and g.splitlines(child.b)
        gnx = s[len('gnx'):].strip()
        if gnx not in d:
            g.trace(child.h,gnx)
            delete.append(child.copy())
    for p in reversed(delete):
        p.doDelete()
    c.selectPosition(views)
#@+node:ekr.20150312225028.87: *6* vc.comments...
#@+node:ekr.20150312225028.88: *7* vc.comment_delims
def comment_delims(self,p):
    '''Return the comment delimiter in effect at p, an @auto node.'''
    vc = self
    c = vc.c
    d = g.get_directives_dict(p)
    s = d.get('language') or c.target_language
    language,single,start,end = g.set_language(s,0)
    return single,start,end
#@+node:ekr.20150312225028.89: *7* vc.delete_leading_comments
def delete_leading_comments(self,delims,p):
    '''
    Scan for leading comments from p and return them.
    At present, this only works for single-line comments.
    '''
    single,start,end = delims
    if single:
        lines = g.splitLines(p.b)
        result = []
        for s in lines:
            if s.strip().startswith(single):
                result.append(s)
            else: break
        if result:
            p.b = ''.join(lines[len(result):])
            # g.trace('len(result)',len(result),p.h)
            return ''.join(result)
    return None
#@+node:ekr.20150312225028.90: *7* vc.is_comment_node
def is_comment_node(self,p,root,delims=None):
    '''Return True if p.b contains nothing but comments or blank lines.'''
    vc = self
    if not delims:
        delims = vc.comment_delims(root)
    # pylint: disable=unpacking-non-sequence
    single,start,end = delims
    assert single or start and end,'bad delims: %r %r %r' % (single,start,end)
    if single:
        for s in g.splitLines(p.b):
            s = s.strip()
            if s and not s.startswith(single) and not g.isDirective(s):
                return False
        return True
    else:
        def check_comment(s):
            done,in_comment = False,True
            i = s.find(end)
            if i > -1:
                tail = s[i+len(end):].strip()
                if tail: done = True
                else: in_comment = False
            return done,in_comment
        
        done,in_comment = False,False
        for s in g.splitLines(p.b):
            s = s.strip()
            if not s:
                pass
            elif in_comment:
                done,in_comment = check_comment(s)
            elif g.isDirective(s):
                pass
            elif s.startswith(start):
                done,in_comment = check_comment(s[len(start):])
            else:
                # g.trace('fail 1: %r %r %r...\n%s' % (single,start,end,s)
                return False
            if done:
                return False
        # All lines pass.
        return True
#@+node:ekr.20150312225028.91: *7* vc.is_comment_organizer_node
# def is_comment_organizer_node(self,p,root):
    # '''
    # Return True if p is an organizer node in the given @auto tree.
    # '''
    # return p.hasChildren() and vc.is_comment_node(p,root)
#@+node:ekr.20150312225028.92: *7* vc.post_move_comments
def post_move_comments(self,root):
    '''Move comments from the start of nodes to their parent organizer node.'''
    vc = self
    c = vc.c
    delims = vc.comment_delims(root)
    for p in root.subtree():
        if p.hasChildren() and not p.b:
            s = vc.delete_leading_comments(delims,p.firstChild())
            if s:
                p.b = s
                # g.trace(p.h)
#@+node:ekr.20150312225028.93: *7* vc.pre_move_comments
def pre_move_comments(self,root):
    '''
    Move comments from comment nodes to the next node.
    This must be done before any other processing.
    '''
    vc = self
    c = vc.c
    delims = vc.comment_delims(root)
    aList = []
    for p in root.subtree():
        if p.hasNext() and vc.is_comment_node(p,root,delims=delims):
            aList.append(p.copy())
            next = p.next()
            if p.b: next.b = p.b + next.b
    # g.trace([z.h for z in aList])
    c.deletePositionsInList(aList)
        # This sets c.changed.
#@+node:ekr.20150312225028.94: *6* vc.find...
# The find commands create the node if not found.
#@+node:ekr.20150312225028.95: *7* vc.find_absolute_unl_node
def find_absolute_unl_node(self,unl,priority_header=False):
    '''Return a node matching the given absolute unl.
    If priority_header == True and the node is not found, it will return the longest matching UNL starting from the tail
    '''
    import re
    pos_pattern = re.compile(r':(\d+),?(\d+)?$')
    vc = self
    aList = unl.split('-->')
    if aList:
        first,rest = aList[0],'-->'.join(aList[1:])
        count = 0
        pos = re.findall(pos_pattern,first)
        nth_sib,pos = pos[0] if pos else (0,0)
        pos = int(pos) if pos else 0
        nth_sib = int(nth_sib)
        first = re.sub(pos_pattern,"",first).replace('--%3E','-->')
        for parent in vc.c.rootPosition().self_and_siblings():
            if parent.h.strip() == first.strip():
                if pos == count:
                    if rest:
                        return vc.find_position_for_relative_unl(parent,rest,priority_header=priority_header)
                    else:
                        return parent
                count = count+1
        #Here we could find and return the nth_sib if an exact header match was not found
    return None
#@+node:ekr.20150312225028.96: *7* vc.find_at_auto_view_node & helper
def find_at_auto_view_node (self,root):
    '''
    Return the @auto-view node for root, an @auto node.
    Create the node if it does not exist.
    '''
    vc = self
    views = vc.find_at_views_node()
    p = vc.has_at_auto_view_node(root)
    if not p:
        p = views.insertAsLastChild()
        p.h = '@auto-view:' + root.h[len('@auto'):].strip()
        p.b = vc.at_auto_view_body(root)
    return p
#@+node:ekr.20150312225028.97: *7* vc.find_clones_node
def find_at_clones_node(self,root):
    '''
    Find the @clones node for root, an @auto node.
    Create the @clones node if it does not exist.
    '''
    vc = self
    c = vc.c
    h = '@clones'
    auto_view = vc.find_at_auto_view_node(root)
    p = g.findNodeInTree(c,auto_view,h)
    if not p:
        p = auto_view.insertAsLastChild()
        p.h = h
    return p
#@+node:ekr.20150312225028.98: *7* vc.find_at_headlines_node
def find_at_headlines_node(self,root):
    '''
    Find the @headlines node for root, an @auto node.
    Create the @headlines node if it does not exist.
    '''
    vc = self
    c = vc.c
    h = '@headlines'
    auto_view = vc.find_at_auto_view_node(root)
    p = g.findNodeInTree(c,auto_view,h)
    if not p:
        p = auto_view.insertAsLastChild()
        p.h = h
    return p
#@+node:ekr.20150312225028.99: *7* vc.find_gnx_node
def find_gnx_node(self,gnx):
    '''Return the first position having the given gnx.'''
    # This is part of the read logic, so newly-imported
    # nodes will never have the given gnx.
    vc = self
    for p in vc.c.all_unique_positions():
        if p.v.gnx == gnx:
            return p
    return None
#@+node:ekr.20150312225028.100: *7* vc.find_organizers_node
def find_at_organizers_node(self,root):
    '''
    Find the @organizers node for root, and @auto node.
    Create the @organizers node if it doesn't exist.
    '''
    vc = self
    c = vc.c
    h = '@organizers'
    auto_view = vc.find_at_auto_view_node(root)
    p = g.findNodeInTree(c,auto_view,h)
    if not p:
        p = auto_view.insertAsLastChild()
        p.h = h
    return p
#@+node:ekr.20150312225028.101: *7* vc.find_position_for_relative_unl
def find_position_for_relative_unl(self,parent,unl,priority_header=False):
    '''
    Return the node in parent's subtree matching the given unl.
    The unl is relative to the parent position.
    If priority_header == True and the node is not found, it will return the longest matching UNL starting from the tail
    '''
    # This is called from finish_create_organizers & compute_all_organized_positions.
    trace = False # and not g.unitTesting
    trace_loop = True
    trace_success = False
    vc = self
    if not unl:
        if trace and trace_success:
            g.trace('return parent for empty unl:',parent.h)
        return parent
    # The new, simpler way: drop components of the unl automatically.
    drop,p = [],parent # for debugging.
    # if trace: g.trace('p:',p.h,'unl:',unl)
    import re
    pos_pattern = re.compile(r':(\d+),?(\d+)?$')
    for s in unl.split('-->'):
        found = False # The last part must match.
        if 1:
            # Create the list of children on the fly.
            aList = vc.headlines_dict.get(p.v)
            if aList is None:
                aList = [z.h for z in p.children()]
                vc.headlines_dict[p.v] = aList
            try:
                pos = re.findall(pos_pattern,s)
                nth_sib,pos = pos[0] if pos else (0,0)
                pos = int(pos) if pos else 0
                nth_sib = int(nth_sib)
                s = re.sub(pos_pattern,"",s).replace('--%3E','-->')
                indices = [i for i, x in enumerate(aList) if x == s]
                if len(indices)>pos:
                    #First we try the nth node with same header
                    n = indices[pos]
                    p = p.nthChild(n)
                    found = True
                elif len(indices)>0:
                    #Then we try any node with same header
                    n = indices[-1]
                    p = p.nthChild(n)
                    found = True
                elif not priority_header:
                    #Then we go for the child index if return_pos is true
                    if len(aList)>nth_sib:
                        n = nth_sib
                    else:
                        n = len(aList)-1
                    if n>-1:
                        p = p.nthChild(n)
                    else:
                        g.es('Partial UNL match: Referenced level is higher than '+str(p.level()))
                    found = True
                if trace and trace_loop: g.trace('match:',s)
            except ValueError: # s not in aList.
                if trace and trace_loop: g.trace('drop:',s)
                drop.append(s)
        else: # old code.
            for child in p.children():
                if child.h == s:
                    p = child
                    found = True
                    if trace and trace_loop: g.trace('match:',s)
                    break
                # elif trace and trace_loop: g.trace('no match:',child.h)
            else:
                if trace and trace_loop: g.trace('drop:',s)
                drop.append(s)
    if not found and priority_header:
        aList = []
        for p in vc.c.all_unique_positions():
            if p.h.replace('--%3E','-->') in unl:
                aList.append((p.copy(),p.get_UNL(False,False,True)))
        unl_list = [re.sub(pos_pattern,"",x).replace('--%3E','-->') for x in unl.split('-->')]
        for iter_unl in aList:
            maxcount = 0
            count = 0
            compare_list = unl_list[:]
            for header in reversed(iter_unl[1].split('-->')):
                if re.sub(pos_pattern,"",header).replace('--%3E','-->') == compare_list[-1]:
                    count = count+1
                    compare_list.pop(-1)
                else:
                    break
            if count > maxcount:
                p = iter_unl[0]
                found = True
    if found:
        if trace and trace_success:
            g.trace('found unl:',unl,'parent:',p.h,'drop',drop)
    else:
        if trace: g.trace('===== unl not found:',unl,'parent:',p.h,'drop',drop)
    return p if found else None
#@+node:ekr.20150312225028.102: *7* vc.find_representative_node
def find_representative_node (self,root,target):
    '''
    root is an @auto node. target is a clones node within root's tree.
    Return a node *outside* of root's tree that is cloned to target,
    preferring nodes outside any @<file> tree.
    Never return any node in any @views or @view tree.
    '''
    trace = False and not g.unitTesting
    assert target
    assert root
    vc = self
    # Pass 1: accept only nodes outside any @file tree.
    p = vc.c.rootPosition()
    while p:
        if p.h.startswith('@view'):
            p.moveToNodeAfterTree()
        elif p.isAnyAtFileNode():
            p.moveToNodeAfterTree()
        elif p.v == target.v:
            if trace: g.trace('success 1:',p,p.parent())
            return p
        else:
            p.moveToThreadNext()
    # Pass 2: accept any node outside the root tree.
    p = vc.c.rootPosition()
    while p:
        if p.h.startswith('@view'):
            p.moveToNodeAfterTree()
        elif p == root:
            p.moveToNodeAfterTree()
        elif p.v == target.v:
            if trace: g.trace('success 2:',p,p.parent())
            return p
        else:
            p.moveToThreadNext()
    g.trace('no representative node for:',target,'parent:',target.parent())
    return None
#@+node:ekr.20150312225028.103: *7* vc.find_views_node
def find_at_views_node(self):
    '''
    Find the first @views node in the outline.
    If it does not exist, create it as the *last* top-level node,
    so that no existing positions become invalid.
    '''
    vc = self
    c = vc.c
    p = g.findNodeAnywhere(c,'@views')
    if not p:
        last = c.rootPosition()
        while last.hasNext():
            last.moveToNext()
        p = last.insertAfter()
        p.h = '@views'
        # c.selectPosition(p)
        # c.redraw()
    return p
#@+node:ekr.20150312225028.104: *6* vc.has...
# The has commands return None if the node does not exist.
#@+node:ekr.20150312225028.105: *7* vc.has_at_auto_view_node
def has_at_auto_view_node(self,root):
    '''
    Return the @auto-view node corresponding to root, an @root node.
    Return None if no such node exists.
    '''
    vc = self
    c = vc.c
    assert vc.is_at_auto_node(root) or vc.is_at_file_node(root),root
    views = g.findNodeAnywhere(c,'@views')
    if views:
        # Find a direct child of views with matching headline and body.
        for p in views.children():
            if vc.match_at_auto_body(p,root):
                return p
    return None
#@+node:ekr.20150312225028.106: *7* vc.has_clones_node
def has_at_clones_node(self,root):
    '''
    Find the @clones node for an @auto node with the given unl.
    Return None if it does not exist.
    '''
    vc = self
    p = vc.has_at_auto_view_node(root)
    return p and g.findNodeInTree(vc.c,p,'@clones')
#@+node:ekr.20150312225028.107: *7* vc.has_at_headlines_node
def has_at_headlines_node(self,root):
    '''
    Find the @clones node for an @auto node with the given unl.
    Return None if it does not exist.
    '''
    vc = self
    p = vc.has_at_auto_view_node(root)
    return p and g.findNodeInTree(vc.c,p,'@headlines')
#@+node:ekr.20150312225028.108: *7* vc.has_organizers_node
def has_at_organizers_node(self,root):
    '''
    Find the @organizers node for root, an @auto node.
    Return None if it does not exist.
    '''
    vc = self
    p = vc.has_at_auto_view_node(root)
    return p and g.findNodeInTree(vc.c,p,'@organizers')
#@+node:ekr.20150312225028.109: *7* vc.has_views_node
def has_at_views_node(self):
    '''Return the @views or None if it does not exist.'''
    vc = self
    return g.findNodeAnywhere(vc.c,'@views')
#@+node:ekr.20150312225028.110: *6* vc.is...
#@+node:ekr.20150312225028.111: *7* vc.is_at_auto_node
def is_at_auto_node(self,p):
    '''Return True if p is an @auto node.'''
    return g.match_word(p.h,0,'@auto') and not g.match(p.h,0,'@auto-')
        # Does not match @auto-rst, etc.

def is_at_file_node(self,p):
    '''Return True if p is an @file node.'''
    return g.match_word(p.h,0,'@file')
#@+node:ekr.20150312225028.112: *7* vc.is_cloned_outside_parent_tree
def is_cloned_outside_parent_tree(self,p):
    '''Return True if a clone of p exists outside the tree of p.parent().'''
    return len(list(set(p.v.parents))) > 1
#@+node:ekr.20150312225028.113: *7* vc.is_organizer_node
def is_organizer_node(self,p,root):
    '''
    Return True if p is an organizer node in the given @auto tree.
    '''
    vc = self
    return p.hasChildren() and vc.is_comment_node(p,root)

#@+node:ekr.20150312225028.114: *6* vc.testing...
#@+node:ekr.20150312225028.115: *7* vc.compare_test_trees
def compare_test_trees(self,root1,root2):
    '''
    Compare the subtrees whose roots are given.
    This is called only from unit tests.
    '''
    vc = self
    s1,s2 = vc.trial_write(root1),vc.trial_write(root2)
    if s1 == s2:
        return True
    g.trace('Compare:',root1.h,root2.h)
    p2 = root2.copy().moveToThreadNext()
    for p1 in root1.subtree():
        if p1.h == p2.h:
            g.trace('Match:',p1.h)
        else:
            g.trace('Fail: %s != %s' % (p1.h,p2.h))
            break
        p2.moveToThreadNext()
    return False
#@+node:ekr.20150312225028.116: *7* vc.compare_trial_writes
def compare_trial_writes(self,s1,s2):
    '''
    Compare the two strings, the results of trial writes.
    Stop the comparison after the first mismatch.
    '''
    trace_matches = False
    full_compare = False
    lines1,lines2 = g.splitLines(s1),g.splitLines(s2)
    i,n1,n2 = 0,len(lines1),len(lines2)
    while i < n1 and i < n2:
        s1,s2 = lines1[i].rstrip(),lines2[i].rstrip()
        i += 1
        if s1 == s2:
            if trace_matches: g.trace('Match:',s1)
        else:
            g.trace('Fail:  %s != %s' % (s1,s2))
            if not full_compare: return
    if i < n1:
        g.trace('Extra line 1:',lines1[i])
    if i < n2:
        g.trace('Extra line 2:',lines2[i])
#@+node:ekr.20150312225028.117: *7* vc.dump_list
def dump_list(self,aList,indent=4):
    '''Dump a list, one item per line.'''
    lead = '\n' + ' '*indent
    return lead+lead.join(sorted(aList))
#@+node:ekr.20150312225028.118: *7* vc.trial_write
def trial_write(self,root):
    '''
    Return a trial write of outline whose root is given.
    
    **Important**: the @auto import and write code end all nodes with
    newlines. Because no imported nodes are empty, the code below is
    *exactly* equivalent to the @auto write code as far as trailing
    newlines are concerned. Furthermore, we can treat Leo directives as
    ordinary text here.
    '''
    vc = self
    if 1:
        # Do a full trial write, exactly as will be done later.
        at = vc.c.atFileCommands
        ok = at.writeOneAtAutoNode(root,
            toString=True,force=True,trialWrite=True)
        if ok:
            return at.stringOutput
        else:
            g.trace('===== can not happen')
            return ''
    elif 1:
        # Concatenate all body text.  Close, but not exact.
        return ''.join([p.b for p in root.self_and_subtree()])
    else:
        # Compare headlines, ignoring nodes without body text and comment nodes.
        # This was handy during early development.
        return '\n'.join([p.h for p in root.self_and_subtree()
            if p.b and not p.h.startswith('#')])
#@+node:ekr.20150312225028.119: *6* vc.unls...
#@+node:ekr.20150312225028.120: *7* vc.drop_all_organizers_in_unl
def drop_all_organizers_in_unl(self,organizer_unls,unl):
    '''Drop all organizer unl's in unl, recreating the imported unl.'''
    vc = self
    def unl_sort_key(s):
        return s.count('-->')
    for s in reversed(sorted(organizer_unls,key=unl_sort_key)):
        if unl.startswith(s):
            s2 = vc.drop_unl_tail(s)
            unl = s2 + unl[len(s):]
    return unl[3:] if unl.startswith('-->') else unl
#@+node:ekr.20150312225028.121: *7* vc.drop_unl_tail & vc.drop_unl_parent
def drop_unl_tail(self,unl):
    '''Drop the last part of the unl.'''
    return '-->'.join(unl.split('-->')[:-1])

def drop_unl_parent(self,unl):
    '''Drop the penultimate part of the unl.'''
    aList = unl.split('-->')
    return '-->'.join(aList[:-2] + aList[-1:])
#@+node:ekr.20150312225028.122: *7* vc.get_at_organizer_unls
def get_at_organizer_unls(self,p):
    '''Return the unl: lines in an @organizer: node.'''
    return [s[len('unl:'):].strip()
        for s in g.splitLines(p.b)
            if s.startswith('unl:')]

#@+node:ekr.20150312225028.123: *7* vc.relative_unl & unl
def relative_unl(self,p,root):
    '''Return the unl of p relative to the root position.'''
    vc = self
    result = []
    ivar = vc.headline_ivar
    for p in p.self_and_parents():
        if p == root:
            break
        else:
            h = getattr(p.v,ivar,p.h)
            result.append(h)
    return '-->'.join(reversed(result))

def unl(self,p):
    '''Return the unl corresponding to the given position.'''
    vc = self
    return '-->'.join(reversed([
        getattr(p.v,vc.headline_ivar,p.h)
            for p in p.self_and_parents()]))
    # return '-->'.join(reversed([p.h for p in p.self_and_parents()]))
#@+node:ekr.20150312225028.124: *7* vc.source_unl
def source_unl(self,organizer_unls,organizer_unl):
    '''Return the unl of the source node for the given organizer_unl.'''
    vc = self
    return vc.drop_all_organizers_in_unl(organizer_unls,organizer_unl)
#@+node:ekr.20150312225028.125: *7* vc.unl_tail
def unl_tail(self,unl):
    '''Return the last part of a unl.'''
    return unl.split('-->')[:-1][0]
#@+node:ekr.20150312225028.126: *4* vc.Commands
@g.command('view-pack')
def view_pack_command(event):
    c = event.get('c')
    if c and c.viewController:
        c.viewController.pack()

@g.command('view-unpack')
def view_unpack_command(event):
    c = event.get('c')
    if c and c.viewController:
        c.viewController.unpack()
        
@g.command('at-file-to-at-auto')
def at_file_to_at_auto_command(event):
    c = event.get('c')
    if c and c.viewController:
        c.viewController.convert_at_file_to_at_auto(c.p)
#@+node:ekr.20140711111623.17795: *4* class ConvertController (leoPersistence.py)
class ConvertController(object):
    '''A class to convert @file trees to @auto trees.'''

    def __init__(self, c, p):
        self.c = c
        self.pd = c.persistenceController
        self.root = p.copy()
    @others
#@+node:ekr.20140711111623.17796: *5* convert.delete_at_data_nodes
def delete_at_data_nodes(self, root):
    '''Delete all @data nodes pertaining to root.'''
    cc = self
    pd = cc.pd
    while True:
        p = pd.has_at_data_node(root)
        if not p: break
        p.doDelete()
#@+node:ekr.20140711111623.17797: *5* convert.import_from_string
def import_from_string(self, s):
    '''Import from s into a temp outline.'''
    cc = self # (ConvertController)
    c = cc.c
    # ic = c.importCommands
    root = cc.root
    language = g.scanForAtLanguage(c, root)
    ext = '.' + g.app.language_extension_dict.get(language)
    scanner = g.app.scanner_for_ext(c, ext)
    # g.trace(language,ext,scanner.__name__)
    p = root.insertAfter()
    ok = scanner(atAuto=True, c=c, parent=p, s=s)
    p.h = root.h.replace('@file', '@auto' if ok else '@@auto')
    return ok, p
#@+node:ekr.20140711111623.17798: *5* convert.run
def run(self):
    '''Convert an @file tree to @auto tree.'''
    trace = True and not g.unitTesting
    trace_s = False
    cc = self
    c = cc.c
    root, pd = cc.root, c.persistenceController
    # set the expected imported headline for all vnodes.
    t1 = time.time()
    cc.set_expected_imported_headlines(root)
    t2 = time.time()
    # Delete all previous @data nodes for this tree.
    cc.delete_at_data_nodes(root)
    t3 = time.time()
    # Ensure that all nodes of the tree are regularized.
    ok = pd.prepass(root)
    t4 = time.time()
    if not ok:
        g.es_print('Can not convert', root.h, color='red')
        if trace: g.trace(
            '\n  set_expected_imported_headlines: %4.2f sec' % (t2 - t1),
            # '\n  delete_at_data_nodes:          %4.2f sec' % (t3-t2),
            '\n  prepass:                         %4.2f sec' % (t4 - t3),
            '\n  total:                           %4.2f sec' % (t4 - t1))
        return
    # Create the appropriate @data node.
    at_auto_view = pd.update_before_write_foreign_file(root)
    t5 = time.time()
    # Write the @file node as if it were an @auto node.
    s = cc.strip_sentinels()
    t6 = time.time()
    if trace and trace_s:
        g.trace('source file...\n', s)
    # Import the @auto string.
    ok, p = cc.import_from_string(s)
    t7 = time.time()
    if ok:
        # Change at_auto_view.b so it matches p.gnx.
        at_auto_view.b = pd.at_data_body(p)
        # Recreate the organizer nodes, headlines, etc.
        pd.update_after_read_foreign_file(p)
        t8 = time.time()
        # if not ok:
            # p.h = '@@' + p.h
            # g.trace('restoring original @auto file')
            # ok,p = cc.import_from_string(s)
            # if ok:
                # p.h = '@@' + p.h + ' (restored)'
                # if p.next():
                    # p.moveAfter(p.next())
        t9 = time.time()
    else:
        t8 = t9 = time.time()
    if trace: g.trace(
        '\n  set_expected_imported_headlines: %4.2f sec' % (t2 - t1),
        # '\n  delete_at_data_nodes:          %4.2f sec' % (t3-t2),
        '\n  prepass:                         %4.2f sec' % (t4 - t3),
        '\n  update_before_write_foreign_file:%4.2f sec' % (t5 - t4),
        '\n  strip_sentinels:                 %4.2f sec' % (t6 - t5),
        '\n  import_from_string:              %4.2f sec' % (t7 - t6),
        '\n  update_after_read_foreign_file   %4.2f sec' % (t8 - t7),
        '\n  import_from_string (restore)     %4.2f sec' % (t9 - t8),
        '\n  total:                           %4.2f sec' % (t9 - t1))
    if p:
        c.selectPosition(p)
    c.redraw()
#@+node:ekr.20140711111623.17799: *5* convert.set_expected_imported_headlines
def set_expected_imported_headlines(self, root):
    '''Set v._imported_headline for every vnode.'''
    trace = False and not g.unitTesting
    cc = self
    c = cc.c
    ic = cc.c.importCommands
    language = g.scanForAtLanguage(c, root)
    ext = '.' + g.app.language_extension_dict.get(language)
    aClass = g.app.classDispatchDict.get(ext)
    scanner = aClass(importCommands=ic, atAuto=True)
    # Duplicate the fn logic from ic.createOutline.
    theDir = g.setDefaultDirectory(c, root, importing=True)
    fn = c.os_path_finalize_join(theDir, root.h)
    fn = root.h.replace('\\', '/')
    junk, fn = g.os_path_split(fn)
    fn, junk = g.os_path_splitext(fn)
    if aClass and hasattr(scanner, 'headlineForNode'):
        for p in root.subtree():
            if not hasattr(p.v, '_imported_headline'):
                h = scanner.headlineForNode(fn, p)
                setattr(p.v, '_imported_headline', h)
                if trace and h != p.h:
                    g.trace('==>', h) # p.h,'==>',h
#@+node:ekr.20140711111623.17800: *5* convert.strip_sentinels
def strip_sentinels(self):
    '''Write the file to a string without headlines or sentinels.'''
    trace = False and not g.unitTesting
    cc = self
    at = cc.c.atFileCommands
    # ok = at.writeOneAtAutoNode(cc.root,
        # toString=True,force=True,trialWrite=True)
    at.errors = 0
    at.write(cc.root,
        kind='@file',
        nosentinels=True,
        perfectImportFlag=False,
        scriptWrite=False,
        thinFile=True,
        toString=True)
    ok = at.errors == 0
    s = at.stringOutput
    if trace: g.trace('ok:', ok, 's:...\n' + s)
    return s
#@+node:ekr.20140711111623.17794: *4* pd.convert_at_file_to_at_auto
def convert_at_file_to_at_auto(self, root):
    if root.isAtFileNode():
        ConvertController(self.c, root).run()
    else:
        g.es_print('not an @file node:', root.h)
#@+node:ekr.20140131101641.15495: *4* pd.prepass & helper
def prepass(self, root):
    '''Make sure root's tree has no hard-to-handle nodes.'''
    c, pd = self.c, self
    ic = c.importCommands
    ic.tab_width = c.getTabWidth(root)
    language = g.scanForAtLanguage(c, root)
    ext = g.app.language_extension_dict.get(language)
    if not ext: return
    if not ext.startswith('.'): ext = '.' + ext
    scanner = g.app.scanner_for_ext(c, ext)
    if not scanner:
        g.trace('no scanner for', root.h)
        return True # Pretend all went well.
    # Pass 1: determine the nodes to be inserted.
    ok = True
    # parts_list = []
    for p in root.subtree():
        ok2 = pd.regularize_node(p, scanner)
        ok = ok and ok2 ### (ok2 or parts)
    return ok
        ### if parts: parts_list.append(parts)
    # Pass 2: actually insert the nodes.
    ### This no longer happens.
        # if ok:
            # for parts in reversed(parts_list):
                # p0 = None
                # for part in reversed(parts):
                    # i1, i2, headline, p = part
                    # if p0 is None:
                        # p0 = p
                    # else:
                        # assert p == p0, (p, p0)
                    # s = p.b
                    # g.trace(p.h, '-->', headline)
                    # p2 = p.insertAfter()
                    # p2.b = s[i1: i2]
                    # p2.h = headline
                # p0.doDelete()
        # return ok
#@+node:ekr.20140131101641.15496: *5* pd.regularize_node
def regularize_node(self, p, scanner):
    '''Regularize node p so that it will not cause problems.'''
    c = self.c
    ok = scanner(atAuto=True, c=c, parent=p, s=p.b)
        # The scanner is a callback returned by g.app.scanner_for_ext.
        # It must have a c argument.
    if not ok:
        g.es_print('please regularize:', p.h)
    return ok
#@+node:ekr.20150312225028.128: *3* Static type checking...
#@+node:ekr.20150312225028.129: *4*  Deduction stuff
#@+node:ekr.20150312225028.130: *5* DeductionTraverser class
class DeductionTraverser (AstTraverser):

    '''A class to create all Deduction objects by traversing the AST.
    
    This second tree traversal happens after the scope-resolution pass
    has computed the ultimate Context for all names.
    '''

    @others
#@+node:ekr.20150312225028.131: *6*  dt.ctor
def __init__(self,fn):

    # Init the base class: calls create_dispatch_table()
    AstTraverser.__init__(self,fn)
    
    self.in_arg_list = False
    self.in_lhs = False
    self.in_rhs = False
#@+node:ekr.20150312225028.132: *6*  dt.traverse
def traverse (self,s):
    
    '''Perform all checks on the source in s.'''
    
    t1 = time.time()

    tree = ast.parse(s,filename=self.fn,mode='exec')

    t2 = time.time()
    self.u.stats.parse_time += t2-t1
    
    self.visit(tree)
    
    t3 = time.time()
    self.u.stats.pass1_time += t3-t2
#@+node:ekr.20150312225028.133: *6* dt.Contexts
#@+node:ekr.20150312225028.134: *7* dt.ClassDef
# ClassDef(identifier name, expr* bases, stmt* body, expr* decorator_list)

def do_ClassDef (self,tree):

    self.visit(tree.name)
    
    for z in tree.body:
        self.visit(z)
#@+node:ekr.20150312225028.135: *7* dt.FunctionDef
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)

def do_FunctionDef (self,tree):
    
    self.visit(tree.name)
    
    # No deductions correspond to formal args.
        # assert self.kind(tree.args) == 'arguments'
        # for z in tree.args.args:
            # self.visit(z)
        # for z in tree.args.defaults:
            # self.visit(z)
    
    # Visit the body.
    for z in tree.body:
        self.visit(z)
#@+node:ekr.20150312225028.136: *7* dt.Module
def do_Module (self,tree):

    for z in tree.body:
        self.visit(z)
#@+node:ekr.20150312225028.137: *6* dt.Operands
#@+node:ekr.20150312225028.138: *7* dt.Attribute (rewrite)
def do_Attribute(self,tree):
    
    name = tree.attr
    
    # Use the *formatter* to traverse tree.value.
    expr = g_format_tree(tree.value)
    s = '%s.%s' % (expr,name)
    
    chain = cx.st.add_chain(tree,s)
    
    if use_deductions and self.in_rhs:
        if trace: g.trace('Adding chain to dependencies',chain)
        self.dependencies.append((ast,chain),)
    
    self.u.stats.n_attributes += 1
    return s
        
    
#@+node:ekr.20150312225028.139: *7* dt.bool
# Python 2.x only.
def do_bool(self,tree):
    pass
    
#@+node:ekr.20150312225028.140: *7* dt.Bytes
# Python 3.x only.
def do_Bytes(self,tree):
    pass

#@+node:ekr.20150312225028.141: *7* dt.Call
def do_Call(self,tree):

    self.visit(tree.func)
    for z in tree.args:
        self.visit(z)
    for z in tree.keywords:
        self.visit(z)

    if hasattr(tree,'starargs') and tree.starargs:
        if self.isiterable(tree.starargs):
            for z in tree.starargs:
                self.visit(z)
        else:# Bug fix: 2012/10/22: always visit the tree.
            self.visit(tree.starargs)

    if hasattr(tree,'kwargs') and tree.kwargs:
        if self.isiterable(tree.kwargs):
            for z in tree.kwargs:
                self.visit(z)
        else:
            # Bug fix: 2012/10/22: always visit the tree.
            self.visit(tree.kwargs)
#@+node:ekr.20150312225028.142: *7* dt.comprehension
def do_comprehension(self,tree):

    self.visit(tree.target)
    self.visit(tree.iter)
    for z in tree.ifs:
        self.visit(z)

#@+node:ekr.20150312225028.143: *7* dt.Dict
def do_Dict(self,tree):

    for z in tree.keys:
        self.visit(z)
    for z in tree.values:
        self.visit(z)

#@+node:ekr.20150312225028.144: *7* dt.Ellipsis
def do_Ellipsis(self,tree):
    pass

#@+node:ekr.20150312225028.145: *7* dt.ExtSlice
def do_ExtSlice (self,tree):

    for z in tree.dims:
        self.visit(z)

#@+node:ekr.20150312225028.146: *7* dt.Index
def do_Index (self,tree):

    self.visit(tree.value)

#@+node:ekr.20150312225028.147: *7* dt.int
def do_int (self,s):
    pass

#@+node:ekr.20150312225028.148: *7* dt.Keyword
def do_Keyword (self,tree):

    self.visit(tree.arg)
    self.visit(tree.value)

#@+node:ekr.20150312225028.149: *7* dt.List
def do_List(self,tree):

    for z in tree.elts:
        self.visit(z)
    self.visit(tree.ctx)

#@+node:ekr.20150312225028.150: *7* dt.ListComp
def do_ListComp(self,tree):

    self.visit(tree.elt)

    for z in tree.generators:
        self.visit(z)
        
#@+node:ekr.20150312225028.151: *7* dt.Name
def do_Name(self,tree):

    name = tree.id # a string.

    # if isPython3:
        # if name in self.u.module_names:
            # return
    # else:
        # if name in dir(__builtin__) or name in self.u.module_names:
            # return
            
    ctx = self.visit(tree.ctx)
            
    if ctx == 'Load': # Most common.
        pass
    elif ctx == 'Store': # Next most common.
        pass
    elif ctx == 'Param':
        pass
    else:
        assert ctx == 'Del',ctx
        cx.del_names.add(name)
        self.u.stats.n_del_names += 1
#@+node:ekr.20150312225028.152: *7* dt.Num
def do_Num(self,tree):
    pass

#@+node:ekr.20150312225028.153: *7* dt.Slice
def do_Slice (self,tree):

    if hasattr(tree,'lower') and tree.lower is not None:
        self.visit(tree.lower)
    if hasattr(tree,'upper') and tree.upper is not None:
        self.visit(tree.upper)
    if hasattr(tree,'step') and tree.step is not None:
        self.visit(tree.step)

#@+node:ekr.20150312225028.154: *7* dt.Str
def do_Str (self,tree):
    '''This represents a string constant.'''
    pass
#@+node:ekr.20150312225028.155: *7* dt.Subscript
def do_Subscript(self,tree):

    self.visit(tree.slice)
    self.visit(tree.ctx)

#@+node:ekr.20150312225028.156: *7* dt.Tuple
def do_Tuple(self,tree):

    for z in tree.elts:
        self.visit(z)
    self.visit(tree.ctx)
#@+node:ekr.20150312225028.157: *6* dt.Statements
#@+node:ekr.20150312225028.158: *7* dt.Assign
def do_Assign(self,tree):
    
    val = self.visit(tree.value)
    
    for z in tree.targets:
        target = self.visit(z)
        Deduction(tree,self.assign_deducer,target,val)
#@+node:ekr.20150312225028.159: *7* dt.AugAssign
def do_AugAssign(self,tree):

    Deduction(tree,
        self.visit(tree.op), # deducer method.
        self.visit(tree.target), # lhs
        self.visit(tree.value), # rhs
    )
#@+node:ekr.20150312225028.160: *7* dt.Call
def do_Call(self,tree):

    f        = self.visit(tree.func)
    args     = [self.visit(z) for z in tree.args]
    keywords = [self.visit(z) for z in tree.keywords]
    starargs = self.visit(tree.starargs) if  hasattr(tree,'starargs') and tree.starargs else []
    kwargs   = self.visit(tree.kwargs) if hasattr(tree,'kwargs') and tree.kwargs else []
        
    Deduction(tree,self.call_deducer,f,args,keywords,starags,kwargs)
#@+node:ekr.20150312225028.161: *7* dt.For
def do_For (self,tree):
    
    self.visit(tree.target)

    self.visit(tree.iter)
    
    for z in tree.body:
        self.visit(z)

    for z in tree.orelse:
        self.visit(z)
#@+node:ekr.20150312225028.162: *7* dt.Global
def do_Global(self,tree):

    pass
#@+node:ekr.20150312225028.163: *7* dt.Import & helpers
def do_Import(self,tree):

    pass
#@+node:ekr.20150312225028.164: *7* dt.ImportFrom
def do_ImportFrom(self,tree):
    
    pass
#@+node:ekr.20150312225028.165: *7* dt.Lambda & helper
def do_Lambda (self,tree):
    
    # Lambda args do not create deductions.
        # assert self.kind(tree) == 'arguments'
        # for z in tree.args.args:
            # self.visit(z)
        # for z in tree.args.defaults:
            # self.visit(z)
            
    self.visit(tree.body)
#@+node:ekr.20150312225028.166: *7* dt.Return
def do_Return(self,tree):
    
    if tree.value:
        val = self.visit(tree.value)
        Deduction(tree,self.return_deducer,val)
    else:
        Deduction(tree,self.return_deducerd)
#@+node:ekr.20150312225028.167: *7* dt.With
def do_With (self,tree):
    
    if hasattr(tree,'context_expression'):
        self.visit(tree.context_expresssion)

    if hasattr(tree,'optional_vars'):
        try:
            for z in tree.optional_vars:
                self.visit(z)
        except TypeError: # Not iterable.
            self.visit(tree.optional_vars)
    
    for z in tree.body:
        self.visit(z)
#@+node:ekr.20150312225028.168: *5* old Deduction ctor
def __init__ (sd,target,aList):
    
    if trace:
        name,obj = target
        deps = [b.short_description() for a,b in aList]
        g.trace('(Op) lhs: %s, aList: %s' % (name,deps))
    
    self.deps = aList
        # a list tuples (ast,s)
        # describing the symbols on which the target depends.
        # s is a string, either a plain id or an id chain.
        
    self.sd = sd

    self.target = target
        # A tuple (name,object) representing the target (LHS) of an assignment statement.
        # name is the spelling (a string) of the plain id or id chain.
        # object is a Chain for chains; a SymbolTableEntry for plain ids.
        # Note: chain.e is the SymbolTableEntry for chains.
        
    sd.n_dependencies += 1
    
    self.fold()
#@+node:ekr.20150312225028.169: *5* e.become_known (To do)
def remove_symbol (self,e):
    
    '''The type of this SymbolTableEntry has just become known.
    
    Remove e from this Dependency.
    
    If the Dependency becomes known, do the following:
        
    - Call eval_ast to evaluate the type.
    - Assign the type to the Dependency's symbol.
    - Add the symbol to sd.known_types.
    '''
  
    e = self
    
    for dep in e.dependencies:
        dep.remove(e) # May add entries to sd.known_types.
    e.dependencies = []

    g.trace(e)
#@+node:ekr.20150312225028.170: *5* e.is_known
def is_known (self):
    
    '''return True if this is a known symbol.'''
    
    return len(self.vals) == 1
#@+node:ekr.20150312225028.171: *4*  Inference-related classes
#@+node:ekr.20150312225028.172: *5* class CacheTraverser (AstTraverser)
class CacheTraverser(AstTraverser):
    
    '''A class to report the contents of caches.'''
    
    def __init__(self):
    
        AstTraverser.__init__(self)
        self.level = 0
    
    @others
#@+node:ekr.20150312225028.173: *6* ct.show_cache
def show_cache(self,obj,cache,tag):
    
    d = cache
    pad = ' '*2*self.level
    result = []
    for key in sorted(d.keys()):
        aList = d.get(key)
        if len(aList) > 1 or (aList and repr(aList[0]) != 'Unknown'):
            # result.append('  %s%20s => %s' % (pad,key,aList))
            result.append('  %s%s' % (pad,aList))
    if result:
        s = self.format(obj) if isinstance(obj,ast.AST) else repr(obj)
        s = s.replace('\n','')
        if len(s) > 40: s = s[:37]+'...'
        if len(result) == 1:
            print('%s%s: %40s -> %s' % (pad,tag,s,result[0].strip()))
        else:
            print('%s%s: %s' % (pad,tag,s))
            for s in result:
                print(s)
#@+node:ekr.20150312225028.174: *6* ct.run
def run (self,node):

    self.check_visitor_names()
    fn = ' for %s' % (g.shortFileName(self.fn)) if self.fn else ''
    print('\nDump of caches%s...' % fn)
    self.visit(node)
#@+node:ekr.20150312225028.175: *6* ct.traversers
#@+node:ekr.20150312225028.176: *7* ct.visit
def visit(self,node):
    
    """Walk a tree of AST nodes."""

    assert isinstance(node,ast.AST),node.__class__.__name__

    method_name = 'do_' + node.__class__.__name__
    method = getattr(self,method_name,None)
    if method:
        # method is responsible for traversing subtrees.
        return method(node)
    else:
        self.visit_cache(node)

        # Traverse subtrees automatically, without calling visit_children.
        for child in self.get_child_nodes(node):
            self.visit(child)
#@+node:ekr.20150312225028.177: *7* ct.visit_cache
def visit_cache(self,node):
    
    if hasattr(node,'cache'):
        self.show_cache(node,node.cache,'cache')
        
    if hasattr(node,'e') and hasattr(node.e,'call_cache'):
        self.show_cache(node,node.e.call_cache,'call_cache')
#@+node:ekr.20150312225028.178: *6* ct.visitors
#@+node:ekr.20150312225028.179: *7* ct.ClassDef
# ClassDef(identifier name, expr* bases, stmt* body, expr* decorator_list)

def do_ClassDef(self,node):
    
    pad = ' '*2*self.level
    bases = ','.join([self.format(z) for z in node.bases])
    print('%sclass %s(%s)' % (pad,node.name,bases))
    
    self.level += 1
    try:
        self.visit_children(node)
    finally:
        self.level -= 1
#@+node:ekr.20150312225028.180: *7* ct.functionDef
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)

def do_FunctionDef (self,node):
    
    pad = ' '*2*self.level
    print('%sdef %s(%s)' % (pad,node.name,self.format(node.args)))
    
    self.level += 1
    try:
        self.visit_children(node)
    finally:
        self.level -= 1
#@+node:ekr.20150312225028.181: *5* class ChainPrinter (OpPatternFormatter)
class ChainPrinter: ### (OpPatternFormatter):
    
    def __init__ (self,fn):
    
        self.d = {}
        self.top_attribute = True
    
        ### OpPatternFormatter.__init__ (self)
            # Init the base class.

    @others
#@+node:ekr.20150312225028.182: *6* Attribute
# Attribute(expr value, identifier attr, expr_context ctx)

def do_Attribute(self,node):
    
    top = self.top_attribute
    try:
        self.top_attribute = False
        value = node.value
        attr  = node.attr
        s = '%s.%s' % (
            self.visit(value),
            self.visit(attr))
    finally:
        self.top_attribute = top
        
    if top:
        aList = s.split('.')
        if aList:
            name,rest = aList[0],aList[1:]
            if (
                name == 'self' and len(rest) > 1 or
                name != 'self' and len(rest) > 0
            ):
                aList2 = self.d.get(name,[])
                if rest not in aList2:
                    aList2.append(rest)
                    self.d[name] = aList2
                
    return s
#@+node:ekr.20150312225028.183: *6* showChains
def showChains(self):
    
    verbose = False
    result = []
    d,n1,n2 = self.d,0,0
    for key in sorted(d.keys()):
        aList = d.get(key)
        for chain in sorted(aList):
            s = '.'.join(chain)
            if s.find('(') > -1 or s.find('[') > -1 or s.find('{') > -1:
                # print('%s.%s' % (key,s))
                result.append('%s.%s' % (key,s))
                n2 += 1
            else:
                if verbose:
                    result.append('%s.%s' % (key,s))
                n1 += 1

    return n1,n2,'\n'.join(result)

#@+node:ekr.20150312225028.184: *5* class Context & subclasses
<< define class Context >>

@others
#@+node:ekr.20150312225028.185: *6* << define class Context >>
class Context:

    '''The base class of all Context objects.
    Contexts represent static scopes.'''

    @others
#@+node:ekr.20150312225028.186: *7*  cx ctor
def __init__(self,parent_context):

    self.format = u.format
    self.kind = '<Unknown context kind>' # All subclasses set this.
    self.name = '<Unknown context name>' # All subclasses set this.
    self.parent_context = parent_context
    self.st = SymbolTable(cx=self)
    self.stats = Stats()
    self.stats.n_contexts += 1

    # Public semantic data: accessed via getters.
    self.assignments_list = [] # All assignment statements.
    self.calls_list = [] # All call statements defined in this context.
    self.classes_list = [] # Classes defined in this context.
    self.defs_list = [] # Functions defined in this context.
    self.expressions_list = [] # Expr nodes in this context.
    self.definitions_of = [] # Assignments, imports and arguments that define this symbol.
    self.imported_symbols_list = [] # All imported symbols.
    self.methods_list = [] # # All methods of a class context.  Elements are DefContexts.
    self.returns_list = [] # List of all return statements in the context.
    self.statements_list = [] # List of *all* statements in the context.
    self.yields_list = [] # List of all yield statements in the context.

    # Private semantic data: no getters.
    self.n_lambdas = 0
        # Number of lambdas in this context:
        # Used to synthesize names of the form 'Lambda@@n'
    self.defining_context = self
    # self.global_names = set()
        # Names that appear in a global statement in this context.
    self.node = None
        # The AST tree representing this context.
#@+node:ekr.20150312225028.187: *7* cx.__getstate__
def __getstate__(self):
    
    '''Return the representation of the Context class for use by pickle.'''
    
    d = {
        'calls':        [repr(z) for z in self.calls_list],
        'classes':      [repr(z) for z in self.classes_list],
        'defs':         [repr(z) for z in self.defs_list],
        'statements':   [repr(z) for z in self.statements()],
    }

    return d
#@+node:ekr.20150312225028.188: *7* cx.__hash__
# Important: Define __hash__ only if __eq__ is also defined.

def __hash__ (self):
    return id(self)

# This is defined below...

# def __eq__ (self,other):
    # return id(self) == id(other)
#@+node:ekr.20150312225028.189: *7* cx.__repr__ & __str__
def __repr__ (self):

    return 'Cx:id(%s)' % id(self)
    
__str__ = __repr__
#@+node:ekr.20150312225028.190: *7* cx.__eq__ & __ne__(others return NotImplemented)
# Py3k wants __lt__ etc, and Py2k needs all of them defined.

# Use identity only for contexts!
def __lt__(self, other): return NotImplemented 
def __le__(self, other): return NotImplemented 
def __eq__(self, other): return id(self) == id(other)
def __ne__(self, other): return id(self) != id(other)
def __gt__(self, other): return NotImplemented 
def __ge__(self, other): return NotImplemented 

# if 1:
    # # Ignore case in comparisons.
    # def __lt__(self, other): return self.name.lower() <  other.name.lower()
    # def __le__(self, other): return self.name.lower() <= other.name.lower()
    # def __eq__(self, other): return self.name.lower() == other.name.lower()
    # def __ne__(self, other): return self.name.lower() != other.name.lower()
    # def __gt__(self, other): return self.name.lower() >  other.name.lower()
    # def __ge__(self, other): return self.name.lower() >= other.name.lower()
# else:
    # def __lt__(self, other): return self.name <  other.name
    # def __le__(self, other): return self.name <= other.name
    # def __eq__(self, other): return self.name == other.name
    # def __ne__(self, other): return self.name != other.name
    # def __gt__(self, other): return self.name >  other.name
    # def __ge__(self, other): return self.name >= other.name
#@+node:ekr.20150312225028.191: *7* cx.description & short_description
def description (self):
    
    '''Return a description of this context and all parent contexts.'''
    
    if self.parent_context:
        return  '%s:%s' % (
            self.parent_context.description(),repr(self))
    else:
        return repr(self)

def short_description(self):
    return repr(self)
#@+node:ekr.20150312225028.192: *7* cx.dump_statements
def dump_statements(self,var_filter=None):
    
    cx = self
    # aList = [node.dump(0,var_filter=var_filter) for node in cx.local_statements()]
    aList = [self.u.dump_ast(node) for node in cx.local_statements()]
    return '\n'.join([z for z in aList if z.strip()])
#@+node:ekr.20150312225028.193: *7* cx.full_name
def full_name (self):
    
    '''Return a context name for compatibility with HTMLReportTraverser.'''
    
    # A hack: must match the name generated in rt.report().
    
    return 'report_writer_test' if self.name == '<string>' else self.name
#@+node:ekr.20150312225028.194: *7* cx.generators & getters
# Unlike in leoInspect, most of these getters return lists of Statement objects.
#@+node:ekr.20150312225028.195: *8* cx.assignments
# This is really a helper for assignments_to/using.
def assignments(self):
    
    result = []
    for cx in self.contexts():
        result.extend(cx.assignments_list)
    return result
#@+node:ekr.20150312225028.196: *8* cx.assignments_to (rewritten)
def assignments_to (self,s):
    
    cx = self
    result = []
    for node in cx.assignments():
        statement = cx.u.format(node)
        kind = cx.u.kind(node)
        if kind == 'Assign':
            #  Assign(expr* targets, expr value)
            for target in node.targets:
                kind2 = cx.u.kind(target)
                if kind2 == 'Name':
                    if s == target.id:
                        result.append(statement)
                elif kind2 == 'Tuple':
                    # Tuple(expr* elts, expr_context ctx)
                    for item2 in target.elts:
                        if cx.u.kind(item2) == 'Name' and s == item2.id:
                            result.append(statement)
        elif kind == 'AugAssign':
            kind2 = cx.u.kind(node.target)
            if kind2 == 'Name':
                if s == node.target.id:
                    result.append(statement)
        elif kind == 'For':
            s2 = statement
            i = s2.find(' in ')
            assert s2.startswith('for ')
            assert i > -1
            s2 = s2[4:i].strip('()')
            aList = s2.split(',')
            if s in aList:
                i = statement.find(':\n')
                assert i > -1
                result.append(statement[:i+1])
        elif kind == 'ListComp':
            # node.generators is a comprehension.
            for item in node.generators:
                target = item.target
                kind2 = cx.u.kind(target)
                if kind2 == 'Name':
                    if s == target.id:
                        result.append(statement)
                elif kind2 == 'Tuple':
                    for item2 in target.elts:
                        if cx.u.kind(item2) == 'Name' and s == item2.id:
                            result.append(statement)
                            break
                else:
                    assert False,kind2
        else:
            assert False,kind
    return list(set(result))
#@+node:ekr.20150312225028.197: *8* cx.assignments_using
def assignments_using (self,s):
    
    result = []
    for node in self.assignments():
        assert node.kind in ('Assign','AugAssign'),node.kind
        val = node.value
        rhs = self.format(val)
        i = rhs.find(s,0)
        while -1 < i < len(rhs):
            if g.match_word(rhs,i,s):
                result.append(node)
                break
            else:
                i += len(s)

    return result
#@+node:ekr.20150312225028.198: *8* cx.call_args_of
def call_args_of (self,s):
    
    result = []
    for node in self.calls():
        assert node.kind == 'Call'
        func = self.format(node.func)
        if s == func:
            result.append(node) ### Should return only args.

    return result
#@+node:ekr.20150312225028.199: *8* cx.calls
# This is really a helper for calls_to/call_args_of.
def calls(self):
    
    result = []
    for cx in self.contexts():
        result.extend(cx.calls_list)
    return result
#@+node:ekr.20150312225028.200: *8* cx.calls_to
def calls_to (self,s):

    result = []
    for node in self.calls():
        assert node.kind == 'Call'
        func = self.format(node.func)
        if s == func:
            result.append(node)

    return result
#@+node:ekr.20150312225028.201: *8* cx.classes
def classes (self):
    
    result = []
    for cx in self.contexts():
        result.extend(cx.classes_list)
    return result
#@+node:ekr.20150312225028.202: *8* cx.contexts & getters
def contexts (self,name=None):
        
    '''An iterator returning all contexts.
    
    If name is given, return only contexts with the given name.'''
    
    cx = self
    
    if name is None or cx.name == name:
        yield cx
       
    for cx2 in self.classes_list:
        for z in cx2.contexts(name=name):
            if z != self:
                yield z

    for cx2 in self.defs_list:
        for z in cx2.contexts(name=name):
            if z != self:
                yield z
#@+node:ekr.20150312225028.203: *9* get_contexts and get_unique_context
# These getters are designed for unit testing.
def get_contexts(self,name):
    
    '''Return the list of symbol tables having the given name.
    If the list has exactly one element, return it.'''
    
    aList = list(self.contexts(name=name))
    return aList[0] if aList and len(aList) == 1 else aList
   
def get_unique_context (self,name):
    
    '''Return the unique symbol table having the given name.
    Raise AssertionError if the unexpected happens.'''
    
    aList = list(self.contexts(name=name))
    assert aList and len(aList) == 1,aList
    return aList[0]
#@+node:ekr.20150312225028.204: *8* cx.defs
def defs (self):
    
    result = []
    for cx in self.contexts():
        result.extend(cx.defs_list)
    return result
#@+node:ekr.20150312225028.205: *8* cx.parent_contexts
def parent_contexts (self):
    
    cx = self
    result = []

    while cx.parent_context:
        result.append(cx.parent_context)
        cx = cx.parent_context

    result.reverse()
    return result
#@+node:ekr.20150312225028.206: *8* cx.returns (dubious)
# Using cx.returns_list will almost always be correct.

if 0:
    
    def returns (self):
        
        '''Return all return statements in the present context and all descendant contexts.'''
        
        result = []
        for cx in self.contexts():
            result.extend(cx.returns_list)
        return result
#@+node:ekr.20150312225028.207: *8* cx.statements (new)
def statements (self):
    
    '''A generator yielding all statements in the receiver context, in the proper order.'''

    cx = self
    assert cx.kind in ('class','def','lambda','module')
    for node in cx.statements_list:
        yield node
#@+node:ekr.20150312225028.208: *8* cx.symbol_tables & getters
def symbol_tables (self,name=None):
    
    '''Return all symbol tables for all contexts.
    If name is given, return only symbol tables for contexts with the given name.'''
    
    cx = self

    if name:
        for cx2 in self.contexts():
            if name == cx2.name:
                yield cx2.st
    else:
        for cx2 in self.contexts():
            yield cx2.st

#@+node:ekr.20150312225028.209: *9* get_symbol_tables and get_unique_symbol_table
# These getters are designed for unit testing.
def get_symbol_tables (self,name):
    
    '''Return the list of symbol tables having the given name.
    If the list has exactly one element, return it.'''
    
    aList = list(self.symbol_tables(name=name))
    return aList[0] if aList and len(aList) == 1 else aList
   
def get_unique_symbol_table (self,name):
    
    '''Return the unique symbol table having the given name.
    Raise AssertionError if the unexpected happens.'''
    
    aList = list(self.symbol_tables(name=name))
    assert aList and len(aList) == 1,aList
    return aList[0]
#@+node:ekr.20150312225028.210: *8* cx.symbol_table_entries
def symbol_table_entries (self,name):
    
    '''Return all STE's for the given name.'''
    
    cx = self

    for cx2 in cx.contexts():
        d = cx2.st.d
        e = d.get(name)
        if e:
            yield d.get(name)
#@+node:ekr.20150312225028.211: *8* cx.local_statements
def local_statements(self):
    
    '''Return the top-level statements of a context.'''
    
    cx = self

    assert cx.kind in ('class','def','lambda','module')

    return cx.node.body
#@+node:ekr.20150312225028.212: *7* cx.line_number (not used)
# def line_number (self):
    
    # return self.tree_ptr.lineno
#@+node:ekr.20150312225028.213: *7* cx.token_range (TO DO) (Uses tree_ptr)
# def token_range (self):
    
    # tree = self.tree_ptr
    
    # # return (
        # # g.toUnicode(self.byte_array[:tree.col_offset]),
        # # g.toUnicode(self.byte_array[:tree_end_col_offset]),
    # # )
    
    # if getattr(tree,'col_offset',None):
        # return tree.lineno,tree.col_offset,tree.end_lineno,tree.end_col_offset
    # else:
        # return -1,-1
#@+node:ekr.20150312225028.214: *6* class ClassContext
class ClassContext (Context):

    '''A class to hold semantic data about a class.'''
    
    @others

#@+node:ekr.20150312225028.215: *7* ClassContext.__init__
def __init__(self,u,parent_context,name,node,bases):

    Context.__init__(self,u,parent_context)
        # Init the base class.

    self.ctor = None # Filled in when def __init__ seen.
    self.kind = 'class'
    self.bases = bases # A list of ast.Name nodes?
    self.name = name
    self.class_context  = self
    self.def_context = self.parent_context.def_context
    self.ivars_dict = {} # Keys are names, values are reaching sets.
    self.module_context = self.parent_context.module_context
    self.node = node
    u.stats.n_classes += 1
#@+node:ekr.20150312225028.216: *7* ClassContext.__repr__& __str__
def __repr__ (self):

    if self.bases:
        bases = [self.format(z) for z in self.bases]
        return 'Cx:class %s(%s)' % (self.name,','.join(bases))
    else:
        return 'Cx:class %s' % (self.name)

__str__ = __repr__        
#@+node:ekr.20150312225028.217: *7* ClassContext.short_description
def short_description(self):
    
    if self.bases:
        bases = [self.format(z) for z in self.bases]
        return 'class %s(%s):' % (self.name,','.join(bases))
    else:
        return 'class %s:' % (self.name)
#@+node:ekr.20150312225028.218: *6* class DefContext
class DefContext (Context):

    '''A class to hold semantic data about a function/method.'''
        
    @others

    
#@+node:ekr.20150312225028.219: *7* DefContext.__init__
def __init__(self,u,parent_context,name):
    
    Context.__init__(self,u,parent_context)
    self.kind = 'def'
    self.name = name
    self.args = None # Must be set later.
    self.class_context = self.parent_context.class_context
    self.def_context = self
    self.module_context = self.parent_context.module_context
    self.node = None
    u.stats.n_defs += 1
#@+node:ekr.20150312225028.220: *7* DefContext.__repr__ & __str__
def __repr__ (self):
    
    args = self.format(self.args) if self.args else  '<**no args yet**>'

    return 'Cx:def %s(%s)' % (self.name,args)

__str__ = __repr__        
#@+node:ekr.20150312225028.221: *7* DefContext.short_description
def short_description (self):
  
    args = self.format(self.args) if self.args else ''

    return 'def %s(%s):' % (self.name,args)
#@+node:ekr.20150312225028.222: *6* class LambdaContext
class LambdaContext (Context):

    '''A class to represent the range of a 'lambda' statement.'''

    def __init__(self,u,parent_context,name):
        Context.__init__(self,u,parent_context)
        self.kind = 'lambda'
        self.args = None # Patched in later.
        self.class_context  = self.parent_context.class_context
        self.def_context    = self.parent_context.def_context
        self.module_context = self.parent_context.module_context
        self.name = name # Set to 'Lambda@@n' by the caller.
        self.node = None
        u.stats.n_lambdas += 1

    def __repr__ (self):
        if self.args:
            args = ','.join([self.format(z) for z in self.args])
        else:
            args = 'None'
        return 'Cx:lambda %s:' % (args)

    __str__ = __repr__
#@+node:ekr.20150312225028.223: *6* class LIbraryModuleContext
class LibraryModuleContext (Context):

    '''A class to hold semantic data about a module.'''

    def __init__(self,u,fn):
        Context.__init__(self,u,parent_context=None)
        self.kind = 'module'
        self.class_context  = None
        self.def_context    = None
        self.fn = g.os_path_abspath(fn)
        self.module_context = self
        self.module_type = Module_Type(u,self,node=None)
            # The singleton *constant* type of this module.
        if fn.find('.') > -1:
            self.name = g.shortFileName(self.fn)[:-3]
        else:
            self.name = fn
        self.node = None
        u.stats.n_library_modules += 1

    def __repr__ (self):
        return 'Cx:module(%s)' % self.name

    __str__ = __repr__        
#@+node:ekr.20150312225028.224: *6* class ModuleContext
class ModuleContext (Context):

    '''A class to hold semantic data about a module.'''

    def __init__(self,u,fn,node):
        Context.__init__(self,u,parent_context=None)
        self.kind = 'module'
        self.class_context  = None
        self.def_context    = None
        self.fn = g.os_path_abspath(fn)
        self.module_context = self
        self.module_type = Module_Type(u,self,node)
            # The singleton *constant* type of this module.
        if fn.find('.') > -1:
            self.name = g.shortFileName(self.fn)[:-3]
        else:
            self.name = fn
        self.node = node
        u.stats.n_modules += 1

    def __repr__ (self):
        return 'Cx:module(%s)' % self.name

    __str__ = __repr__        
#@+node:ekr.20150312225028.225: *5* class ScopeBinder
# Running ScopeBinder on all Leo files:
# 1.25sec when this class is a subclass of AstTraverser
# 0.75sec when this class is a subclass of AstFullTraverser.

class ScopeBinder(AstFullTraverser):
    
    '''Resolve all symbols to the scope in which they are defined.
    
    This pass is invoked by P1; it must run after P1 has injected
    all fields into the trees and discovered all definitions.
    '''
    
    def __init__(self):
        AstFullTraverser.__init__(self)
        self.init_dicts()
        self.u = Utils()

    @others
#@+node:ekr.20150312225028.226: *6* sb.check & helper
def check(self,fn,root):
    trace = True and not g.app.runningAllUnitTests
    u = self.u
    for cx in u.contexts(root):
        assert hasattr(cx,'stc_context'),cx
        result = []
        self.check_context(cx,result)
        if trace and result:
            result=sorted(set(result))
            pad = ' '*u.compute_node_level(cx)
            result.insert(0,'%s%s' % (pad,u.format(cx)))
            if fn:
                result.insert(0,fn)
            for s in result:
                print(s)
#@+node:ekr.20150312225028.227: *7* check_context
def check_context(self,cx,result):
    
    trace = False
    u = self.u
    for statement in u.local_statements(cx):
        if trace:
            pad = ' '*u.compute_node_level(statement)
            print(pad+u.format(statement))
        for node in u.local_nodes(statement):
            # if trace:print(' %s%s: %s' % (pad,node.__class__.__name__,u.format(node)))
            if isinstance(node,ast.Name):
                key = node.id
                def_cx = getattr(node,'stc_scope')
                if def_cx:
                    d = def_cx.stc_symbol_table
                    aSet = d.get('*defined*')
                    if key not in aSet:
                        # UnboundLocalError: no definition in scope.
                        pad = ' '*u.compute_node_level(statement)
                        result.append(' %s*%s %s: %s' % (
                            pad,u.format(node.ctx),key,u.format(statement)))
                else:
                    d = self.lookup(cx,key)
                    if d:
                        assert d.has_key(key),repr(key)
                    else:
                        # No scope.
                        pad = ' '*u.compute_node_level(statement)
                        result.append(' %s+%s %s: %s' % (
                            pad,u.format(node.ctx),key,u.format(statement)))
#@+node:ekr.20150312225028.228: *6* sb.dump_symbol_table
def dump_symbol_table(self,node):
    
    if getattr(node,'stc_symbol_table',None):
        d = node.stc_symbol_table
        for key in sorted(d.keys()):
            name = d.get(key)
            print('%s:%s' % (self.format(name.ctx),name.id))
#@+node:ekr.20150312225028.229: *6* sb.init_dicts
def init_dicts(self):
    
    self.builtins_d = dict([(z,z) for z in __builtins__])

    table = (
        '__builtins__',
        '__file__',
        '__path__',
        '__repr__',
    )
    self.special_methods_d = dict([(z,z) for z in table])
#@+node:ekr.20150312225028.230: *6* sb.lookup
def lookup(self,cx,key):
    
    '''Return the symbol table for key, starting the search at node cx.'''
    
    trace = False and not g.app.runningAllUnitTests
    assert isinstance(cx,(ast.Module,ast.ClassDef,ast.FunctionDef,ast.Lambda)),cx
    cx2 = cx
    while cx2:
        st = cx.stc_symbol_table
        if key in st.d.keys():
            return st.d
        else:
            cx2 = cx2.stc_context
            assert isinstance(cx,(ast.Module,ast.ClassDef,ast.FunctionDef,ast.Lambda)),cx
    for d in (self.builtins_d,self.special_methods_d):
        if key in d.keys():
            return d
    else:
        if trace:
            g.trace('** (ScopeBinder) no definition for %20s in %s' % (
                key,self.u.format(cx)))
        return None
#@+node:ekr.20150312225028.231: *6* sb.run
def run (self,fn,root):

    self.fn = g.shortFileName(fn)
    self.n_resolved = 0
    self.n_visited = 0
    self.visit(root)
    if 0:
        self.check(fn,root)
        # g.trace('ScopeBinder visited %s nodes' % self.n_visited)
#@+node:ekr.20150312225028.232: *6* sb.visit & visitors
def visit(self,node):

    # assert isinstance(node,ast.AST),node.__class__.__name__
    method = getattr(self,'do_' + node.__class__.__name__)
    self.n_visited += 1
    return method(node)
#@+node:ekr.20150312225028.233: *7* sb.FunctionDef
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)

def do_FunctionDef (self,node):
    
    self.visit(node.args)
    for z in node.body:
        self.visit(z)
    for z in node.decorator_list:
        self.visit(z)
  
#@+node:ekr.20150312225028.234: *7* sb.Name
# Name(identifier id, expr_context ctx)

def do_Name(self,node):
    
    '''Set node.stc_scope for all references to names.'''
    
    trace = False and not g.app.runningAllUnitTests

    # if isinstance(node.ctx.__class__,ast.Param):# 
        # assert node.stc_scope is not None,node
            # P1 has defined the scope.
    # elif isinstance(node.ctx,ast.Store):
        # assert node.stc_scope is not None,node
        
    if node.stc_scope is None:
        # Search for the defining context.
        self.n_resolved += 1
        cx = node.stc_context
            # cx will be None if cx is an ast.Module.
            # In that case, self.lookup will search the builtins.
        d = self.lookup(cx,node.id)
        if d is None:
            # g.trace('(ScopeBinder) undefined symbol: %s' % node.id)
            if trace: print('%s undefined name: %s' % (self.fn,node.id))
#@+node:ekr.20150312225028.235: *5* class Pass1 (AstFullTraverser)
class Pass1 (AstFullTraverser):
    
    ''' Pass1 traverses an entire AST tree, creating symbol
    tables and context objects, injecting pointers to them
    in the tree. This pass also resolves Python names to
    their proper context, following the rules of section
    4.1, Naming and binding, of the Python langauge
    reference.
    
    Pass1 uses the tree-traversal code from the AstTraverser
    base class. As a result, not all AST nodes need to be
    visited explicitly.
    
    Pass 1 injects the following fields into ast.AST nodes::

    for N in cx: refs_list: N.e = e
    for N in (ast.Class, ast.FunctionDef and ast.Lambda): N.new_cx = new_cx
    For ast.Name nodes N: N.e = e ; N.cx = cx
    For all operator nodes N: N.op_name = <spelling of operator>

    For every context C, Pass 1 sets the following ivars of C:
        C.node                  <node defining C>
        C.ivars_dict            Dictionary of ivars.
                                Keys are names, values are reaching sets (set by SSA pass)
        C.assignments_list      All assignment statements in C
        C.calls_list            All call statements defined in C.
        C.classes_list          All classes defined in C.
        C.defs_list             All functions defined in C.
        C.expressions_list      All Expr nodes in C.
        C.returns_list          All return statements in C.
        C.yields_list           All yield statements in C.
    '''

    @others
#@+node:ekr.20150312225028.236: *6*  p1.ctor
def __init__(self):
    
    # Init the base class.
    AstTraverser.__init__(self)
    
    # Abbreviations.
    self.stats = Stats()
    self.u = Utils()
    self.format = u.format
    
    # self.gen_flag = False
        # True: enable code generation (in part of an AST).
        # We generate code only for assignments,
        # returns, yields and function calls.

    self.in_attr = False
        # True: traversing inner parts of an AST.Attribute tree.
#@+node:ekr.20150312225028.237: *6*  p1.run (entry point)
def run (self,root):

    self.visit(root)
#@+node:ekr.20150312225028.238: *6*  p1.visit
def visit(self,node):
    
    """Walk a tree of AST nodes, injecting _parent entries into the tree."""
    
    assert isinstance(node,ast.AST),node.__class__.__name__
    node._parent = self.parents[-1]
    if self.context_stack:
        node.cx = self.context_stack[-1]

    self.level += 1
    self.parents.append(node)

    method_name = 'do_' + node.__class__.__name__
    # stat_name = 'n_' + node.__class__.__name__ + '_nodes'
    method = getattr(self,method_name,None)
    if method:
        # method is responsible for traversing subtrees.
        val = method(node)
    else:
        # Traverse subtrees automatically.
        val = None
        for child in self.get_child_nodes(node):
            val = self.visit(child)
            
    self.level -= 1
    self.parents.pop()
    return val
#@+node:ekr.20150312225028.239: *6* p1.helpers
#@+node:ekr.20150312225028.240: *7* p1.bind_name
def bind_name(self,new_cx,old_cx,old_e,name):
    
    trace = False

    new_e = new_cx.st.d.get(name)
    if not new_e:
        # Not an error: name is not defined in new_cx.
        return

    assert old_e
    if old_e == new_e:
        return
        
    if trace and old_e.defs_list:
        g.trace('*****',old_e.defs_list)
        
    if trace:
        g.trace('%22s old_cx: %20s new_cx: %20s' % (name,old_cx,new_cx))

    assert old_cx.st.d.get(name) == old_e
    assert not old_e.defined
    self.stats.n_relinked_names += 1

    # Change all the references to old_e to references to new_e.
    for node in old_e.refs_list:
        kind = self.kind(node)
        assert kind in ('Builtin','Import','ImportFrom','Name'),kind
        setattr(node,'e',new_e)
        self.stats.n_relinked_pointers += 1

    # Merge the reference_lists.
    new_e.refs_list.extend(old_e.refs_list)

    # Relocate the old symbol table entry.
    old_cx.st.d[name] = new_e
#@+node:ekr.20150312225028.241: *7* p1.bind_unbound_name
def bind_unbound_name(self,name,cx):
    
    '''Name has just been bound in context cx.
    
    Bind all matching unbound names in descendant contexts.'''
    
    # Important: this method has little or no effect on overall speed.
    
    # g.trace('*** %10s %s' % (name,cx))
    for cx2 in cx.contexts():
        if cx2 != cx:
            e2 = cx2.st.d.get(name)
            if e2 and not e2.defined:
                self.bind_name(cx,cx2,e2,name)
#@+node:ekr.20150312225028.242: *7* p1.def_args_helper
# arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)

def def_args_helper (self,cx,def_e,node):
    
    assert self.kind(node) == 'arguments'
    self.visit_list(node.args)
    self.visit_list(node.defaults)
    for field in ('vararg','kwarg'): # node.field is a string.
        name = getattr(node,field,None)
        if name:
            e = cx.st.define_name(name)
            self.stats.n_param_names += 1
#@+node:ekr.20150312225028.243: *7* p1.get_import_names
def get_import_names (self,node):

    '''Return a list of the the full file names in the import statement.'''

    result = []

    for ast2 in node.names:

        if self.kind(ast2) == 'alias':
            data = ast2.name,ast2.asname
            result.append(data)
        else:
            g.trace('unsupported kind in Import.names list',self.kind(ast2))

    # g.trace(result)
    return result
#@+node:ekr.20150312225028.244: *7* p1.resolve_import_name
def resolve_import_name (self,spec):

    '''Return the full path name corresponding to the import spec.'''

    trace = False ; verbose = False

    if not spec:
        if trace: g.trace('no spec')
        return ''
    
    ### This may not work for leading dots.
    aList,path,paths = spec.split('.'),None,None

    for name in aList:
        try:
            f,path,description = imp.find_module(name,paths)
            if not path: break
            paths = [path]
            if f: f.close()
        except ImportError:
            # Important: imports can fail due to Python version.
            # Thus, such errors are not necessarily searious.
            if trace: g.trace('failed: %s paths: %s cx: %s' % (
                name,paths,self.get_context()))
            path = None
            break
            
    if trace and verbose: g.trace(name,path)
            
    if not path:
        if trace: g.trace('no path')
        return ''

    if path.endswith('.pyd'):
        if trace: g.trace('pyd: %s' % path)
        return ''
    else:
        if trace: g.trace('path: %s' % path)
        return path
#@+node:ekr.20150312225028.245: *6* p1.visitors
#@+node:ekr.20150312225028.246: *7* p1.Assign
def do_Assign(self,node):
    
    cx = self.get_context()
    self.stats.n_assignments += 1
    self.visit_children(node)
    cx.assignments_list.append(node)
    cx.statements_list.append(node)
#@+node:ekr.20150312225028.247: *7* p1.Attribute
# Attribute(expr value, identifier attr, expr_context ctx)

def do_Attribute(self,node):

    cx = self.get_context()
    self.stats.n_attributes += 1
    old_attr,self.in_attr = self.in_attr,True
    ctx = self.kind(node.ctx)
    self.visit_children(node)
    self.in_attr = old_attr
    if not self.in_attr:
        base_node = self.attribute_base(node)
        assert base_node
        kind = self.kind(base_node)
        if kind in ('Builtin','Name'):
            base_name = base_node.id
            assert base_node and base_name
            e = cx.st.add_name(base_name)
            e.refs_list.append(base_node)
            ### e.add_chain(base,node) ### ?
        elif kind in ('Dict','List','Num','Str','Tuple',):
            pass
        elif kind in ('BinOp','UnaryOp'):
            pass
        else:
            assert False,kind
#@+node:ekr.20150312225028.248: *7* p1.AugAssign
def do_AugAssign(self,node):
    
    self.stats.n_assignments += 1
    cx = self.get_context()
    
    self.visit_children(node)
    cx.assignments_list.append(node)
    cx.statements_list.append(node)
        
#@+node:ekr.20150312225028.249: *7* p1.Call (Stats only)
# Call(expr func, expr* args, keyword* keywords, expr? starargs, expr? kwargs)

def do_Call(self,node):
    
    cx = self.get_context()
    self.stats.n_calls += 1
    cx.calls_list.append(node)

    n = len(node.args or []) + int(bool(node.starargs)) + int(bool(node.kwargs))
    d = self.stats.actual_args_dict
    d[n] = 1 + d.get(n,0)

    self.visit_children(node)
#@+node:ekr.20150312225028.250: *7* p1.ClassDef
# ClassDef(identifier name, expr* bases, stmt* body, expr* decorator_list)

def do_ClassDef (self,node):

    '''Create a context for a class, and
    define the class name in the present context.'''
    
    old_cx = self.get_context()
    name = node.name
    
    # Generate code for the base classes.
    # g.trace([self.format(z) for z in node.bases]) # A list of ast.Name nodes.
    ### bases = self.visit_list(node.bases)
    new_cx = ClassContext(old_cx,name,node,node.bases)
    setattr(node,'new_cx',new_cx) # Bug fix: 2013/01/27

    # Generate code for the class members.
    self.push_context(new_cx)
    self.visit_list(node.body)
    self.pop_context()

    # Define the name in the old context.
    e = old_cx.st.define_name(name)
    e.node = node # 2012/12/25
    node.e = e # 2012/12/25
    # g.trace(e,node)
    e.self_context = new_cx
    old_cx.classes_list.append(new_cx)
    
    # Bind all unbound matching names in inner contexts.
    self.bind_unbound_name(name,new_cx)

#@+node:ekr.20150312225028.251: *7* p1.Expr
# Expr(expr value)

def do_Expr(self,node):
    
    cx = self.get_context()
    self.visit_children(node)
    self.stats.n_expressions += 1
    cx.expressions_list.append(node)
    cx.statements_list.append(node)
#@+node:ekr.20150312225028.252: *7* p1.For
def do_For(self,node):
    
    cx = self.get_context()
    self.stats.n_fors += 1
    self.visit_children(node)
    cx.statements_list.append(node)
    cx.assignments_list.append(node)
#@+node:ekr.20150312225028.253: *7* p1.FunctionDef
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)

def do_FunctionDef (self,node):
    
    # Stats
    args = node.args.args
    n = len(args) if args else 0
    d = self.stats.formal_args_dict
    d[n] = 1 + d.get(n,0)

    # Switch to the new context.
    old_cx = self.get_context()
    
    # Define the function/method name in the old context.
    name = node.name
    e = old_cx.st.define_name(name)

    # Create the new context: args are known in the new context.
    new_cx = DefContext(old_cx,name)
    setattr(node,'new_cx',new_cx) # Bug fix.
    setattr(node,'e',e) # Bug fix: 2012/12/28.
    new_cx.node = node
    e.self_context = new_cx
    
    # If this is a method, remember it:
    if old_cx and old_cx.class_context:
        # If this is the ctor, remember it.
        if name == '__init__':
            old_cx.class_context.ctor = new_cx
        # 2013/01/28: Add the method to the ivars dict.
        d = old_cx.class_context.ivars_dict
        if name in d:
            # Not quite a correct error, but something unusual is happening.
            self.error('%20s method hides ivar' % name)
        else:
            aList = d.get(name,[])
            aList.append(node)
            d [name] = aList

    # Define the function arguments before visiting the body.
    # These arguments, including 'self', are known in the body.
    self.push_context(new_cx)
    self.def_args_helper(new_cx,e,node.args)
    self.pop_context()
    
    new_cx.args = node.args # was set by def_args_helper.
    old_cx.defs_list.append(new_cx)

    # Evaluate the body in the new context.
    self.push_context(new_cx)
    self.visit_list(node.body)
    new_cx.node = e.node = node
    self.pop_context()
    
    # Bind all unbound matching names in inner contexts.
    self.bind_unbound_name(name,new_cx)
#@+node:ekr.20150312225028.254: *7* p1.Global
def do_Global(self,node):

    '''Enter the names in a 'global' statement into the *module* symbol table.'''

    cx = self.get_context()
    cx.statements_list.append(node)
    self.stats.n_globals += 1

    for name in node.names:
        
        # Create a symbol table entry for the name in the *module* context.
        module_e = cx.module_context.st.add_name(name)
        
        # This does *not* define the symbol!
        module_e.defined = False
        
        # Both Python 2 and 3 generate SyntaxWarnings when a name
        # is used before the corresponding global declarations.
        # We can make the same assumpution here:
        # give an *error* if an STE appears in this context for the name.
        # The error indicates that scope resolution will give the wrong result.
        e = cx.st.d.get(name)
        if e:
            self.u.error('name \'%s\' used prior to global declaration' % (name))
            # Add the name to the global_names set in *this* context.
            # cx.global_names.add(name)
            
        # Regardless of error, bind the name in *this* context,
        # using the STE from the module context.
        cx.st.d[name] = module_e
#@+node:ekr.20150312225028.255: *7* p1.Import
@ From Guido:

import x            -->  x = __import__('x')
import x as y       -->  y = __import__('x')
import x.y.z        -->  x = __import__('x.y.z')
import x.y.z as p   -->  p = __import__('x.y.z').y.z
@c

def do_Import(self,node):

    '''Add the imported file to u.files_list if needed
    and create a context for the file.'''

    trace = False
    cx = self.get_context()
    cx.statements_list.append(node)
    e_list,names = [],[]
    for fn,asname in self.get_import_names(node):
        fn2 = self.resolve_import_name(fn)
        # Important: do *not* analyze modules not in the files list.
        if fn2:
            mname = self.u.module_name(fn2)
            if g.shortFileName(fn2) in self.u.files_list: 
                if mname not in self.u.module_names:
                    self.u.module_names.append(mname)
            # if trace: g.trace('%s as %s' % (mname,asname))
            def_name = asname or mname
            names.append(def_name)
            e = cx.st.define_name(def_name) # sets e.defined.
            cx.imported_symbols_list.append(def_name)
            if trace: g.trace('define: (Import) %10s in %s' % (def_name,cx))
            e_list.append(e)

            # Add the constant type to the list of types for the *variable*.
            mod_cx = self.u.modules_dict.get(fn2) or LibraryModuleContext(self.u,fn2)
            e.types_cache[''] = mod_cx.module_type
            self.u.stats.n_imports += 1
        else:
            if trace: g.trace('can not resolve %s in %s' % (fn,cx))

    for e in e_list:
        e.defs_list.append(node)
        e.refs_list.append(node)
#@+node:ekr.20150312225028.256: *7* p1.ImportFrom
@ From Guido:
    
from p.q import x       -->  x = __import__('p.q', fromlist=['x']).x
from p.q import x as y  -->  y = __import__('p.q', fromlist=['x']).x
from ..x.y import z     -->  z = __import('x.y', level=2, fromlist=['z']).z

All these equivalences are still somewhat approximate; __import__
isn't looked up the way other variables are looked up (it is taken
from the current builtins), and if the getattr operation in the "from"
versions raises AttributeError that is translated into ImportError.

There's also a subtlety where "import x.y" implies that y must be a
submodule/subpackage of x, whereas in "from x import y" it may be
either a submodule/subpackage or a plain attribute (e.g. a class,
function or some other variable).
@c

def do_ImportFrom(self,node):

    '''Add the imported file to u.files_list if needed
    and add the imported symbols to the *present* context.'''

    trace = False ; dump = False
    if trace and dump:
        self.u.dump_ast(node)
        
    u = self.u
    cx = self.get_context()
    cx.statements_list.append(node)
    m = self.resolve_import_name(node.module)
    
    if m and m not in self.u.files_list:
        if trace: g.trace('adding module',m)
        self.u.files_list.append(m)

    e_list,names = [],[]
    for fn,asname in self.get_import_names(node):
        fn2 = asname or fn
        if fn2 == '*':
            if trace: g.trace('From x import * not ready yet')
            return
        names.append(fn2)
        e = cx.st.add_name(fn2)
        cx.imported_symbols_list.append(fn2)
        e_list.append(e)
        if trace: g.trace('define: (ImportFrom) %s' % (fn2))
        # Get the ModuleContext corresponding to fn2.
        mod_cx = self.u.modules_dict.get(fn2)
        ###
        ### if not mod_cx:
        ###    self.u.modules_dict[fn2] = mod_cx = ModuleContext(fn2)
        if mod_cx:
            # module_type is the singleton *constant* type of the module.
            module_type = mod_cx.module_type
            # Add the constant type to the list of types for the *variable*.
            e.defined = True # Indicate there is at least one definition.
            e.types_cache[''] = mod_cx.module_type
            mname = u.module_name(fn2)
            ### if mname not in self.u.module_names:
            ###    self.u.module_names.append(mname)
            u.stats.n_imports += 1

    for e in e_list:
        e.defs_list.append(node)
        e.refs_list.append(node)
#@+node:ekr.20150312225028.257: *7* p1.Interactive
def do_Interactive(self,node):
    
    assert False,'Interactive context not supported'
#@+node:ekr.20150312225028.258: *7* p1.Lambda
def do_Lambda (self,node):
    
    old_cx = self.get_context()

    # Synthesize a lambda name in the old context.
    # This name must not conflict with split names of the form name@n.
    old_cx.n_lambdas += 1
    name = 'Lambda@@%s' % old_cx.n_lambdas
    e = old_cx.st.define_name(name)

    # Define a namespace for the 'lambda' variables.
    new_cx = LambdaContext(self.u,old_cx,name)
    setattr(node,'new_cx',new_cx)
    setattr(node,'e',e) # Bug fix: 2012/12/28.
    new_cx.node = node
    
    self.push_context(new_cx)
    def_e = None
    args = self.def_args_helper(new_cx,def_e,node.args)
    body = self.visit(node.body)
    self.pop_context()
#@+node:ekr.20150312225028.259: *7* p1.ListComp
def do_ListComp(self,node):
    
    self.stats.n_list_comps += 1
    self.visit_children(node)

    cx = self.get_context()
    cx.assignments_list.append(node)
#@+node:ekr.20150312225028.260: *7* p1.Module
def do_Module (self,node):

    # Get the module context from the global dict if possible.
    
    # Bug fix: treat all <string> files as separate modules.
    new_cx = None if self.fn == '<string>' else self.u.modules_dict.get(self.fn)

    if not new_cx:
        new_cx = ModuleContext(self.u,self.fn,node)
        self.u.modules_dict[self.fn] = new_cx
        
    new_cx.node = node

    self.push_context(new_cx)
    self.visit_list(node.body)
    self.pop_context()
    
    # Bind all unbound matching names in inner contexts.
    for name in sorted(new_cx.st.d.keys()):
        self.bind_unbound_name(name,new_cx)
#@+node:ekr.20150312225028.261: *7* p1.Name
def do_Name(self,node):

    trace = False
    cx  = self.get_context()
    ctx = self.kind(node.ctx)
    name = node.id
    
    # Create the symbol table entry, even for builtins.
    e = cx.st.add_name(name)
    setattr(node,'e',e)
    setattr(node,'cx',cx)
    
    def_flag,ref_flag=False,False
    
    if ctx in ('AugLoad','AugStore','Load'):
        # Note: AugStore does *not* define the symbol.
        e.referenced = ref_flag = True
        self.stats.n_load_names += 1
    elif ctx == 'Store':
        # if name not in cx.global_names:
        e.defined = def_flag = True
        if trace: g.trace('Store: %s in %s' % (name,cx))
        self.stats.n_store_names += 1
    elif ctx == 'Param':
        if trace: g.trace('Param: %s in %s' % (name,cx))
        e.defined = def_flag = True
        self.stats.n_param_refs += 1
    else:
        assert ctx == 'Del',ctx
        e.referenced = ref_flag = True
        self.stats.n_del_names += 1

    if isPython3:
        if name in self.u.module_names:
            return None
    else:
        if name in dir(__builtin__) or name in self.u.module_names:
            return None

    if not self.in_attr:
        if def_flag: e.defs_list.append(node)
        if ref_flag: e.refs_list.append(node)
#@+node:ekr.20150312225028.262: *7* p1.Return
def do_Return(self,node):
    
    self.stats.n_returns += 1
    cx = self.get_context()
    if getattr(node,'value'):
        self.visit(node.value)
    cx.returns_list.append(node)
    cx.statements_list.append(node)
    # g.trace('%s %s' % (cx.name,self.format(node)))
#@+node:ekr.20150312225028.263: *8* p1.Operators...
# operator = Add | BitAnd | BitOr | BitXor | Div
# FloorDiv | LShift | Mod | Mult | Pow | RShift | Sub | 

def do_Add(self,node):       setattr(node,'op_name','+')
def do_BitAnd(self,node):    setattr(node,'op_name','&')
def do_BitOr(self,node):     setattr(node,'op_name','|')
def do_BitXor(self,node):    setattr(node,'op_name','^')
def do_Div(self,node):       setattr(node,'op_name','/')
def do_FloorDiv(self,node):  setattr(node,'op_name','//')
def do_LShift(self,node):    setattr(node,'op_name','<<')
def do_Mod(self,node):       setattr(node,'op_name','%')
def do_Mult(self,node):      setattr(node,'op_name','*')
def do_Pow(self,node):       setattr(node,'op_name','**')
def do_RShift(self,node):    setattr(node,'op_name','>>')
def do_Sub(self,node):       setattr(node,'op_name','-')

# boolop = And | Or
def do_And(self,node):       setattr(node,'op_name',' and ')
def do_Or(self,node):        setattr(node,'op_name',' or ')

# cmpop = Eq | Gt | GtE | In |
# Is | IsNot | Lt | LtE | NotEq | NotIn
def do_Eq(self,node):        setattr(node,'op_name','==')
def do_Gt(self,node):        setattr(node,'op_name','>')
def do_GtE(self,node):       setattr(node,'op_name','>=')
def do_In(self,node):        setattr(node,'op_name',' in ')
def do_Is(self,node):        setattr(node,'op_name',' is ')
def do_IsNot(self,node):     setattr(node,'op_name',' is not ')
def do_Lt(self,node):        setattr(node,'op_name','<')
def do_LtE(self,node):       setattr(node,'op_name','<=')
def do_NotEq(self,node):     setattr(node,'op_name','!=')
def do_NotIn(self,node):     setattr(node,'op_name',' not in ')

# unaryop = Invert | Not | UAdd | USub
def do_Invert(self,node):   setattr(node,'op_name','~')
def do_Not(self,node):      setattr(node,'op_name',' not ')
def do_UAdd(self,node):     setattr(node,'op_name','+')
def do_USub(self,node):     setattr(node,'op_name','-')
#@+node:ekr.20150312225028.264: *7* p1.With
def do_With(self,node):
    
    cx = self.get_context()
    self.stats.n_withs += 1
    self.visit_children(node)
    cx.statements_list.append(node)
#@+node:ekr.20150312225028.265: *5* class Resolver (keep for now)
class Resolver:
    
    '''A class controlling the resolution pattern matchers.'''

    @others
#@+node:ekr.20150312225028.266: *6*  r.ctor & helper
def __init__(self):
    
    self.app = app
    self.format = app.format
    self.sd = app.sd

    # g.trace('(Resolver)',g.callers())
    
    # Singleton type objects.
    # self.num_type = Num_Type()
    self.string_type = String_Type()

    # Data created in Pass 1...
    self.constants_list = []
        # List of all constant ops.
        
    # Data created just after scope resolution...
    self.self_list = []
        # List of all instances of self within methods.
    
    # The main lists for the main algorithm.
    self.known_symbols_list = []
        # The list of symbols whose types are definitely known.
        # The main algorithm pops symbols off this list.
    self.mushy_ops_list = []
        # Ops with mushy type sets. Debugging only?
    self.mushy_ste_list = []
        # The lists of symbols that would have mushy type sets.
        # The hard part of resolution deals with such symbols.
   
    self.calls_d = {}
        # Keys are contexts, values are list of calls in the context.
    self.defs_d = {} # The global defs dict.
        # Keys are names; values are sets of Contexts
    self.refs_d = {} # The global refs dict.
        # The global dictionary.
        # Keys are names.  Values are sets of contexts.

    # Class info dicts: keys and values are contexts.
    self.class_supers_d = {} # All superclasses.
    self.class_sub_d = {}  # All subclasses.
    self.class_relatives_d = {}
        # All super and subclasses, as well as other related classes.
        
    # Create the dispatch dict.
    self.dispatch_dict = self.make_dispatch_dict()
#@+node:ekr.20150312225028.267: *6*  r.generators
#@+node:ekr.20150312225028.268: *7* r.classes
def classes (self):
    
    '''A generator yielding all class contexts in all modules.'''
    
    r = self
    for cx in r.contexts():
        if cx.kind == 'class':
            yield cx
#@+node:ekr.20150312225028.269: *7* r.contexts
def contexts (self):
    
    '''A generator yielding all contexts in all modules.'''
    
    r = self
    for m in r.modules():
        for cx in m.contexts():
            yield cx
#@+node:ekr.20150312225028.270: *7* r.modules
def modules (self):
    
    d = self.sd.modules_dict
    for fn in sorted(d.keys()):
        m = d.get(fn)
        yield m
#@+node:ekr.20150312225028.271: *7* r.statements
def statements(self):
    
    '''A generator yielding all statements in all modules, in the proper order.'''

    r = self
    
    for cx in r.modules():
        for op in cx.statements():
            yield op
#@+node:ekr.20150312225028.272: *7* r.unresolved_names (TEST)
def unresolved_names(self):
    
    r = self
    for cx in r.contexts():
        for e in cx.st.d.values():
            if not e.resolved:
                yield e
#@+node:ekr.20150312225028.273: *6* r.resolve & initers
def resolve (self):
    
    trace_time = False
    r = self
    
    if trace_time: t1 = time.time()
    
    # Init & do scope resolution.
    r.make_global_dicts()
    
    if trace_time:
        t2 = time.time()
        g.trace('make dicts & resolve scopes: %2.2f sec' % (t2-t1))
    
    # Add 'self', module names and class names to list of known symbols.
    r.init_self()
    
    if trace_time:
        t3 = time.time()
        g.trace('init_self: %2.2f sec' % (t3-t2))
        
    r.init_module_names()
    
    if trace_time:
        t4 = time.time()
        g.trace('init_module_names: %2.2f sec' % (t4-t3))
        
    r.init_class_names()
    
    if trace_time:
        t5 = time.time()
        g.trace('init_class_names: %2.2f sec' % (t5-t4))
    
    r.known_symbols_list.extend(r.self_list)
    r.known_symbols_list.extend(r.constants_list)
    
    # Run the main algorithm.
    r.main_algorithm()
    
    r.resolve_aliases()
    r.resolve_class_relationships()
    r.analyze_classes()
    r.resolve_ivars()
    
    # The table of type-resolution methods.
    table = (
    )

    # Do the main, iterative, peepholes.
    progress = True
    while progress:
        progress = False
        for f in table:
            progress = progress or f()
            
    # Do the final peepholes.
    
    if trace_time:
        t6 = time.time()
        g.trace('main algorithm: %2.2f sec' % (t6-t5))
#@+node:ekr.20150312225028.274: *7* r.init_class_names
def init_class_names(self):
    
    '''Mark all refereces to class names as known.'''
    
    trace = False
    r = self
    format = self.format
    
    # Step 1: Create a dict whose keys are class names and whose values are lists of STE's.
    # Using a dict instead of a list speeds up the code by a factor of more than 300.
    # For all the files of Leo: 30 sec. for the old way and 0.08 sec. the new way.
    e_dict = {}
    for cx in r.classes():
        cx.class_type = Class_Type(cx) # Do this after scope resolution.
        parent = cx.parent_context
        if parent:
            d = parent.st.d
            e = d.get(cx.name)
            if e:
                key = e.name
                aList = e_dict.get(key,[])
                if e not in aList:
                    aList.append(e)
                        # Use a list to disambiguate classes with the same name.
                    e_dict[key] = aList

    # Step 2: Mark all Name ops refering to class names as knowns.
    for cx in r.contexts():
        d = cx.st.d
        for e in d.values():
            for op in e.refs_list:
                kind = op.__class__.__name__
                if kind == 'Builtin':
                    pass ### Not ready yet.
                elif kind == 'Name':
                    e = op.e
                    if e.name in e_dict:
                        aList = e_dict.get(e.name)
                        assert aList
                        if e in aList:
                            if trace: g.trace('known Name',e,op,op._parent,cx)
                            r.known_symbols_list.append(op)
                elif kind == 'Import':
                    aList = op.e_list
                    for e in aList:
                        if e.name in e_dict:
                            aList = e_dict.get(e.name)
                            assert aList
                            if e in aList:
                                if trace: g.trace('known Import',e,op,op._parent,cx)
                                r.known_symbols_list.append(op)
                elif kind == 'ImportFrom':
                    if trace: g.trace('ImportFrom not ready yet: %s' % (
                        format(op)))
                else:
                    assert False,'Unexpected Op: %s' % kind
#@+node:ekr.20150312225028.275: *7* r.init_module_names
def init_module_names(self):
    
    trace = False
    r,sd = self,self.sd
    format = self.format
    
    # Step 1: Create a dict whose keys are module names and whose values are lists of STE's.
    e_dict = {}
    for fn in sd.modules_dict:
        m = sd.modules_dict.get(fn)
        if trace: g.trace(m)
    # module_names = self.u.module_names
    # e_dict = {}
    # for cx in r.classes():
        # d = cx.st.d
        # e = d.get(cx.name)
        # if e:
            # key = e.name
            # aList = e_dict.get(key,[])
            # if e not in aList:
                # aList.append(e)
                    # # Use a list to disambiguate classes with the same name.
                # e_dict[key] = aList
                
    if trace: g.trace(e_dict)

    # Step 2: Mark all Name ops refering to class names as knowns.
    for cx in r.contexts():
        d = cx.st.d
        for e in d.values():
            for op in e.refs_list:
                kind = op.__class__.__name__
                if kind in ('Builtin','Name'):
                    e = op.e
                    if e and e.name in e_dict:
                        aList = e_dict.get(e.name)
                        assert aList
                        if e in aList:
                            if trace: g.trace('known',e,op,op._parent,cx)
                elif kind == 'Import':
                    aList = op.e_list
                    for e in aList:
                        if e.name in e_dict:
                            aList2 = e_dict.get(e.name)
                            assert aList2
                            if e in aList2:
                                if trace: g.trace('known',e,op,op._parent,cx)
                elif kind == 'ImportFrom':
                    if trace: g.trace('ImportFrom not ready yet: %s' % (
                        format(op)))
                else:
                    assert False,'Unexpected Op: %s' % kind
#@+node:ekr.20150312225028.276: *7* r.init_self
def init_self (self):
    
    '''Add all instances of "self" to r.self.list.'''

    r = self
    for class_ in r.classes():
        for def_ in class_.defs():
            e = def_.st.d.get('self')
            if e:
                if len(e.defs_list) > 1:
                    g.trace('*** redefining self',e.defs_list)
                else:
                    r.self_list.extend(e.refs_list)
#@+node:ekr.20150312225028.277: *7* r.main_algorithm
def main_algorithm(self):
    
    r = self
    
    # g.trace('known symbols: %s' % (len(r.known_symbols_list)))
    
    while r.known_symbols_list:
        op = r.known_symbols_list.pop()
        r.make_known(op)
#@+node:ekr.20150312225028.278: *7* r.make_global_dicts
def make_global_dicts (self):
    
    contexts = 0
    r = self
    r.refs_dict = {}
    for m in r.modules():
        for cx in m.contexts():
            contexts += 1
            d = cx.st.d # Keys are names, values are STEs.
            for e in d.values():
                aSet = r.refs_dict.get(e.name,set())
                if cx not in aSet:
                    aSet.add(cx)
                    r.refs_dict[e.name] = aSet
                    
    # r.defs_dict contains entries only for defined names.
    r.defs_dict = {}
    for name in r.refs_dict.keys():
        aSet = r.refs_dict.get(name)
        defs = [cx for cx in aSet if cx.st.d.get(name).defined]
        r.defs_dict[name] = defs

    # g.trace('contexts: %s' % (contexts))
#@+node:ekr.20150312225028.279: *7* r.make_known & op handlers
def make_known(self,op):
    
    '''This is called from the main_algorithm.
    op is an Op representing a name or constant with a single, known type.
    '''

    r = self
    # g.trace('known: %s parent: %s' % (op,op._parent))
    
    if 0:
        g.trace('%10s %9s %-8s %8s %s' % (
            op,id(op),
            op.__class__.__name__,
            op._parent and op._parent.__class__.__name__,op._parent))

    return ###
    # if op._parent:
        # f = r.dispatch_dict.get(op.parent.kind)
        # if f:
            # f(op.parent)
        # else:
            # g.trace('bad op.parent.kind: %s' % op.parent.kind)
            # g.trace(op)
            # assert False
#@+node:ekr.20150312225028.280: *8* Do-nothings (not used at present)
if 0:
    @others
#@+node:ekr.20150312225028.281: *9* r.Arg
def do_Arg (self,op):
    
    # arg = op.arg
    pass
#@+node:ekr.20150312225028.282: *9* r.Arguments
def do_Arguments (self,op):
    
    # args     = [self.visit(z) for z in op.args]
    # defaults = [self.visit(z) for z in op.defaults]
    pass
#@+node:ekr.20150312225028.283: *9* r.AugAssign
def do_AugAssign(self,op):
    
    # This does not define any value!
    pass
#@+node:ekr.20150312225028.284: *9* r.Keyword
def do_Keyword(self,op):
    
    # arg   = op.arg
    # value = op.value
    pass
#@+node:ekr.20150312225028.285: *8* Known types
#@+node:ekr.20150312225028.286: *9* r.Bytes
def do_Bytes(self,op):

    value = op.value
    # g.trace(value)
#@+node:ekr.20150312225028.287: *9* r.Dict
def do_Dict(self,op):

    keys   = op.keys
    values = op.values
    # g.trace(keys,values)
#@+node:ekr.20150312225028.288: *9* r.List
def do_List(self,op):

    elts = op.elements
    # g.trace(elts)
#@+node:ekr.20150312225028.289: *9* r.Num
def do_Num(self,op):
    
    n = op.n
    # g.trace(n)
#@+node:ekr.20150312225028.290: *9* r.Str
def do_Str(self,op):
    
    '''This represents a string constant.'''

    s = op.s
    # g.trace(s)
    
#@+node:ekr.20150312225028.291: *9* r.Tuple
def do_Tuple (self,op):
    
    elts = op.elements
    # g.trace(elts)
#@+node:ekr.20150312225028.292: *8* Names & Builtins
#@+node:ekr.20150312225028.293: *9* r.Builtin
def do_Builtin(self,op):
    
    name = op.name
    # g.trace(name)
#@+node:ekr.20150312225028.294: *9* r.Name
def do_Name(self,op):
    
    name = op.name
    # g.trace(name)
#@+node:ekr.20150312225028.295: *8* Not ready yet
#@+node:ekr.20150312225028.296: *9* r.Comprehension
def do_Comprehension(self,op):

    result = []
    
    name  = op.name
    iter_ = op.it
    ifs   = op.ifs
    # g.trace(name,iter_,ifs)
#@+node:ekr.20150312225028.297: *9* r.GenExp
def do_GenExp (self,op):
    
    elt  = op.elt
    gens = op.generators
    # g.trace(elt,gens)
#@+node:ekr.20150312225028.298: *9* r.Index
def do_Index(self,op):
    
    index = op.index
    # g.trace(index)
#@+node:ekr.20150312225028.299: *9* r.ListComp
def do_ListComp(self,op):

    elt  = op.elt
    gens = op.generators
    # g.trace(elt,gens)
#@+node:ekr.20150312225028.300: *9* r.Slice
def do_Slice(self,op):
    
    upper = op.upper
    lower = op.lower
    step  = op.step
    # g.trace(upper,lower,step)
#@+node:ekr.20150312225028.301: *9* r.Subscript
def do_Subscript(self,op):

    value  = op.value
    slice_ = op.slice_
    # g.trace(value,slice_)
#@+node:ekr.20150312225028.302: *8* Operators
#@+node:ekr.20150312225028.303: *9* r.Attribute
def do_Attribute (self,op):
    
    value = op.value
    attr  = op.attr
    # g.trace(attr,value)
#@+node:ekr.20150312225028.304: *9* r.BinOp
def do_BinOp(self,op):
    
    trace = True
    r = self
    name = op.op_name
    lt   = op.lt
    rt   = op.rt
    assert lt.parent == op
    assert rt.parent == op
    assert op.n_unknowns > 0
    op.n_unknowns -= 1
    if op.n_unknowns > 0: return
    
    ### Testing only.
    if not lt.typ or not rt.typ:
        # if trace: g.trace('missing typ: %s' % op)
        return ###

    assert lt.typ,op
    assert rt.typ,op
    
    if len(lt.typ) == 1 and len(rt.typ):
        lt_type,rt_type = lt.typ[0],rt.typ[0]
        if lt_type == rt_type:
            op.typ = [lt_type]
        # elif lt_type == r.string_type and rt_type == r.num_type:
            # op.typ = [r.string_type]
        else:
            #### Unusual case.
            op.typ.extend(lt.typ)
            op.typ.extend(rt.typ)
    else:
        # Mushy cases.
        op.typ.extend(lt.typ)
        op.typ.extend(rt.typ)
        op.typ = list(set(op.typ))

    if trace and len(op.typ) > 1:
        g.trace('ambiguous: %s%s%s %s' % (lt,name,rt,op.typ))
    
    assert op.typ,'empty op.typ'
    
    if len(op.typ) == 1:
        r.make_known(op)
    else:
        # if trace:
            # g.trace('lt',lt.typ)
            # g.trace('rt',rt.typ)
        r.mushy_ops_list.append(op)
#@+node:ekr.20150312225028.305: *9* r.BoolOp
def do_BoolOp(self,op):
    
    name   = op.op_name
    values = op.values
    # g.trace(name,values)
#@+node:ekr.20150312225028.306: *9* r.Call
def do_Call (self,op):
    
    pass

    # args     = op.args    
    # func     = op.func
    # keyargs  = op.keywords
    # starargs = op.starargs
    # star2args = op.starstarargs
    
    ### We have to know the type of the return value to do anything useful.
    # g.trace(op)
    
    
    
   
#@+node:ekr.20150312225028.307: *9* r.CompareOp
def do_CompareOp(self,op):

    left  = op.left
    ops   = op.ops
    comps = op.comparators
    # g.trace(left,ops,comps)
#@+node:ekr.20150312225028.308: *9* r.TernaryOp
def do_TernaryOp(self,op):

    test   = op.test
    body   = op.body
    orelse = op.orelse
    # g.trace(test,body,orelse)
#@+node:ekr.20150312225028.309: *9* r.UnaryOp
def do_UnaryOp(self,op):
    
    name    = op.op_name
    operand = op.operand
    # g.trace(name,operand)
#@+node:ekr.20150312225028.310: *8* r.Assign
def do_Assign(self,op):
    
    trace = False
    r = self
    target = op.target
    value  = op.value
    assert target.parent == op
    assert value.parent == op
    assert repr(op) == '%s=%s' % (target,value)

    if target.kind == 'Name':
        e = target.e
        defs = e.defs_list
        e.defs_seen += 1

        # Append the new types to e.typ.
        changed = False
        for z in value.typ:
            if z not in e.typ:
                e.typ.append(z)
                changed = True
        if not changed:
            if trace: g.trace('unchanged: %s %s' % (e,e.typ))
            return
                
        # The symbol's type is unambiguously known if
        # a) all defs have been seen and 
        # b) e.type_list has exactly one symbol.
        val = value.typ[0]
        if e.defs_seen == len(defs) and len(value.typ) == 1:
            if trace:
                g.trace('known: %s=%s refs: %s' % (
                    target,val,[z.parent for z in e.refs_list]))
            assert target not in r.known_symbols_list
            # Push all the references to the newly-known symbol.
            r.known_symbols_list.extend(e.refs_list)
            # Add the new value to all Ops in e.refs_list.
            for op in e.refs_list:
                op.typ.append(val)
        else:
            if trace: g.trace('add: %s=%s' % (target,value.typ[0]))
            # Add the new value to all Ops in e.refs_list.
            for op in e.refs_list:
                op.typ.append(val)

        if len(e.typ) > 1:
            if trace: g.trace('mushy: %s=%s' % (target,e.typ))
            if e not in r.mushy_ste_list:
                r.mushy_ste_list.append(e)
                    ### This could be expensive.
                    ### It would be better to make this a per-context list.
    else:
        # assert False,'Unexpected target kind: %s' % target.kind
        if trace: g.trace('unexpected target kind: %s %s' % (target.kind,target))
#@+node:ekr.20150312225028.311: *8* r.do_nothing
def do_nothing(self,op):
    pass
    
    # g.trace(op)
#@+node:ekr.20150312225028.312: *6* The hard part
#@+node:ekr.20150312225028.313: *7* r.analyze_assignments (to do)
def analyze_assignments (self):
    
    r = self
    if 0: ### Old unit test...
        for m in r.modules():
            result = []
            for cx in m.contexts():
                n = len(cx.parent_contexts())
                pad,pad2 = ' '*n,' '*(n+1)
                result.append('%s%s' % (pad,cx))
                if 0:
                    result2 = []
                    for z in cx.assignments_list:
                        result2.append('%s%s' % (
                            pad2,
                            self.format(z.op).strip()))
                    result.extend(sorted(list(set(result2))))
                if 1:
                    result2 = []
                    for z in cx.returns_list:
                        result2.append('%s%s' % (
                            pad2,
                            self.format(z).strip()))
                    result.extend(sorted(list(set(result2))))
#@+node:ekr.20150312225028.314: *7* r.analyze_calls (to do)
def analyze_calls (self):
    
    pass
#@+node:ekr.20150312225028.315: *7* r.analyze_returns (To do)
def analyze_returns(self):
    
    r = self
    result = []
    # g.trace()
    for m in r.modules():
        for cx in m.defs():
            # print(cx.name)
            result.append(cx.name)
            for a in sorted(set(cx.assignments_list)):
                # print(' %s' % (a))
                result.append(' %s' % (a))
    # g.trace('end')
    
    return '\n'.join(result)
            
#@+node:ekr.20150312225028.316: *7* r.resolve_class_relationships
def resolve_class_relationships (self):
    
    r = self
    
    # Class info dicts: keys and values are contexts.
    r.class_supers_d = {} # All superclasses.
    r.class_sub_d = {}  # All sukbclasses.
    r.class_relatives_d = {}
        # All super and subclasses, as well as other related classes.

    excluded_names = ('object',)
    r_d = r.class_relatives_d
    
    return #### Rewrite

    # for m in r.modules():
        # for cx in m.contexts():
            # if cx.kind == 'class' and cx.name not in excluded_names:
                # hash1 = cx.name
                # aSet = r_d.get(hash1,set())
                # for cx2 in cx.bases:
                    # hash2 = repr(cx2)
                    # aSet2 = r_d.get(hash2,set())
                    # aSet2.add(cx)
                    # aSet.add(cx2)
                    # r_d[hash2] = aSet2
                # r_d[hash1] = aSet
#@+node:ekr.20150312225028.317: *7* r.analyze_classes & helpers
def analyze_classes(self,aList=None):
    
    trace = False
    r = self

    if aList:
        # Convert names to classes.
        aList = sorted([z for z in r.classes() if z.name in aList])
    else:
        aList = sorted([z for z in r.classes()])

    for cx1 in aList:
        if trace:
            print('%20s %s' % (cx1.name,cx1.bases))
        # for cx2 in aList:
            # r.analyze_class_pair(aList,cx1,cx2)
    
    # for cx in sorted(aList):
        # g.trace(cx.name)

    # g.trace(len(aList))
    
    return aList
#@+node:ekr.20150312225028.318: *8* r.analyze_class_pair
def analyze_class_pair(self,aLisst,cx1,cx2):
    
    r = self
    if cx1 == cx2:
        return
        
    print('  %s: %s' % (cx2.name,cx2.bases))
    # print('    subclass: %s' % (cx2.name in cx1.bases))
    
#@+node:ekr.20150312225028.319: *7* r.resolve_ivars
def resolve_ivars (self):
    
    r = self
    
    if 0: ###
    
        constants = ('Dict','Int','List','Num','Str','Tuple',)

        g.trace()
    
        trace = True
        
        for m in self.modules():
            g.trace(m)
            for class_ in m.classes():
                for def_ in class_.defs():
                    for op in def_.assignments_to('self'):
                        target = op.target
                        value  = op.value
                        kind = op.__class__.__name__
                        if trace:
                            g.trace(op)
                            g.trace(self.format(op))
                            
                        val = PatternFormatter().visit(value) ### g_pattern_formatter.visit(value)
                        if val in constants:
                            if trace: g.trace('found constant type: %s' % val)
                            if kind == 'Assign':
                                print('%s=%s\n' % (target,val))
                                    # To do: add constant type for the ste for target.
                            else:
                                assert kind=='AugAssign',kind
                                print('%s=%s\n' % (target,val))
#@+node:ekr.20150312225028.320: *7* r.resolve_aliases (will be removed)
def resolve_aliases (self):
    
    r = self
#@+node:ekr.20150312225028.321: *5* class TypeInferrer (AstFullTraverser)
class TypeInferrer (AstFullTraverser):
    
    '''A class to infer the types of objects.'''
    
    def __init__ (self):
        AstFullTraverser.__init__(self)

    def __call__(self,node):
        return self.run(node)
    
    @others
#@+node:ekr.20150312225028.322: *6* ti.clean (*revise*)
def clean (self,aList):
    
    '''Return sorted(aList) with all duplicates removed.'''
    
    if 1:
        return aList or []
    else:
        ti = self
        if 1:
            # Good for debugging and traces.
            result = []
            for z in aList:
                if z not in result:
                    result.append(z)
            
            # An excellent check.
            assert len(result) == len(list(set(aList))),aList
        else:
            result = list(set(aList))
       
        # Strip out inference errors if there are real results.
        result2 = ti.ignore_failures(result)
        if result2:
            ti.stats.n_clean_success += 1
            return sorted(result2)
        else:
            ti.stats.n_clean_fail += 1
            return sorted(result)
#@+node:ekr.20150312225028.323: *6* ti.has_children
def has_children(self,node):
    
    assert isinstance(node,ast.AST),node.__class__.__name__
    
    return any(self.get_child_nodes(node))
#@+node:ekr.20150312225028.324: *6* ti.format
def format(self,node):
    
    u = self.u
    return '%s%s' % (
        ' '*u.compute_node_level(node),
        u.first_line(u.format(node)))
#@+node:ekr.20150312225028.325: *6* ti.init
def init(self):

    self.stats = Stats()
    self.u = Utils()
    
    # Local stats
    self.n_nodes = 0
    
    # Detecting circular inferences
    self.call_stack = [] # Detects recursive calls
    self.assign_stack = [] # Detects circular assignments.

    # Create singleton instances of simple types.
    self.bool_type = Bool_Type()
    self.builtin_type = Builtin_Type()
    self.bytes_type = Bytes_Type()
    self.float_type = Float_Type()
    self.int_type = Int_Type()
    self.none_type = None_Type()
    self.string_type = String_Type()

    # Create the builtin type dict.
    self.builtin_type_dict = {
        'eval': [self.none_type],
        'id':   [self.int_type],
        'len':  [self.int_type],
        'ord':  [self.int_type],
        # list,tuple...
        # close,open,sort,sorted,super...
    }
#@+node:ekr.20150312225028.326: *6* ti.run (entry point)
def run (self,root):
    
    self.init()
    self.visit(root)
#@+node:ekr.20150312225028.327: *6* ti.type helpers
def has_failed(self,t1,t2=[],t3=[]):
    
    return any([isinstance(z,Inference_Failure) for z in t1+t2+t3])
    
def is_circular(self,t1,t2=[],t3=[]):
    
    return any([isinstance(z,Circular_Assignment) for z in t1+t2+t3])
    
def is_recursive(self,t1,t2=[],t3=[]):
    
    return any([isinstance(z,Recursive_Inference) for z in t1+t2+t3])
    
def ignore_failures(self,t1,t2=[],t3=[]):
    
    return [z for z in t1+t2+t3 if not isinstance(z,Inference_Failure)]
    
def ignore_unknowns(self,t1,t2=[],t3=[]):
    
    return [z for z in t1+t2+t3 if not isinstance(z,(Unknown_Type,Unknown_Arg_Type))]
    
def merge_failures(self,t1,t2=[],t3=[]):

    aList = [z for z in t1+t2+t3 if isinstance(z,Inference_Failure)]
    if len(aList) > 1:
        # Prefer the most specific reason for failure.
        aList = [z for z in aList if not isinstance(z,Unknown_Type)]
    return aList
#@+node:ekr.20150312225028.328: *6* ti.visit
def visit(self,node):

    '''Visit a single node.  Callers are responsible for visiting children.'''

    # This assert is redundant.
    # assert isinstance(node,ast.AST),node.__class__.__name__
    method = getattr(self,'do_' + node.__class__.__name__)
    self.n_nodes += 1
    return method(node)
#@+node:ekr.20150312225028.329: *6* ti.visitors
#@+node:ekr.20150312225028.330: *7* ti.expressions
#@+node:ekr.20150312225028.331: *8* ti.Attribute & check_attr (check super classes for attributes)
# Attribute(expr value, identifier attr, expr_context ctx)

def do_Attribute (self,node):

    ti = self
    trace = False and not g.app.runningAllUnitTests
    
    # g.trace(ti.format(node),node.value,node.attr)
    t = ti.visit(node.value) or [] ###
    t = ti.clean(t)
    t = ti.merge_failures(t)
    tag = '%s.%s' % (t,node.attr) # node.attr is always a string.
    if t:
        if len(t) == 1:
            ti.stats.n_not_fuzzy += 1
            t1 = t[0]
            if ti.kind(t1) == 'Class_Type':
                aList = t1.cx.ivars_dict.get(node.attr)
                aList = ti.clean(aList) if aList else []
                if aList:
                    t = []
                    for node2 in aList:
                        t.extend(ti.visit(node2))
                    t = ti.clean(t)
                    ti.set_cache(node,t,tag='ti.Attribute')
                    ti.stats.n_attr_success += 1
                    if trace and trace_found:
                        g.trace('ivar found: %s -> %s' % (tag,t))
                elif t1.cx.bases:
                    if trace_errors: g.trace('bases',
                        ti.format(node),ti.format(t1.cx.bases))
                    ### Must check super classes.
                    t = [Unknown_Type(node)]
                else:
                    u.error('%20s has no %s member' % (ti.format(node),t1.cx.name))
                    t = [Unknown_Type(node)]
            else:
                ti.stats.n_attr_fail += 1
                if trace and trace_errors:
                    g.trace('fail',t,ti.format(node))
                t = [Unknown_Type(node)]
        else:
            ti.stats.n_fuzzy += 1
            if trace and trace_fuzzy: g.trace('fuzzy',t,ti.format(node))
    else:
        if trace and trace_errors: g.trace('fail',t,ti.format(node))
        t = [Unknown_Type(node)]
    # ti.check_attr(node) # Does nothing
    return t
#@+node:ekr.20150312225028.332: *9* ti.check_attr
def check_attr(self,node):
    
    ti = self
    trace = False and not g.app.runningAllUnitTests
    
    return ### Now done in ti.Attribute

    # assert ti.kind(node) == 'Attribute'
    # value = node.value
    # # node.attr is always a string.
    
    # if ti.kind(value) == 'Name':
        # # The ssa pass has computed the ivars dict.
        # # There is no need to examine value.ctx.
        # name = value.id
        # name_e = value.e
        # name_cx = name_e.cx
        # name_class_cx = name_cx.class_context
        # if name == 'self':
            # if name_class_cx:
                # if node.attr in name_class_cx.ivars_dict:
                    # if trace: g.trace('OK: %s.%s' % (
                        # name_class_cx.name,node.attr))
                # else:
                    # ti.error('%s has no %s member' % (
                        # name_class_cx.name,node.attr))
            # else:
                # ti.error('%s is not a method of any class' % (
                    # name)) ####
        # else:
            # ### To do: handle any id whose inferred type is a class or instance.
            # if trace:
                # g.trace('** not checked: %s' % (name))
                # g.trace(ti.u.dump_ast(value))
#@+node:ekr.20150312225028.333: *8* ti.BinOp & helper
def do_BinOp (self,node):

    ti = self
    trace = True and not g.app.runningAllUnitTests
    trace_infer = False ; trace_fail = False
    lt = ti.visit(node.left) or []
    rt = ti.visit(node.right) or []
    lt = ti.clean(lt)
    rt = ti.clean(rt)
    op_kind = ti.kind(node.op)
    num_types = ([ti.float_type],[ti.int_type])
    list_type = [List_Type(None)]
    if rt in num_types and lt in num_types:
        if rt == [ti.float_type] or lt == [ti.float_type]:
            t = [ti.float_type]
        else:
            t = [ti.int_type]
    elif rt == list_type and lt == list_type and op_kind == 'Add':
        t = list_type
    elif op_kind == 'Add' and rt == [ti.string_type] and lt == [ti.string_type]:
        t = [ti.string_type]
    elif op_kind == 'Mult' and rt == [ti.string_type] and lt == [ti.string_type]:
        g.trace('*** User error: string mult')
        t = [Unknown_Type(node)]
    elif op_kind == 'Mult' and (
        (lt==[ti.string_type] and rt==[ti.int_type]) or
        (lt==[ti.int_type] and rt==[ti.string_type])
    ):
        t = [ti.string_type]
    elif op_kind == 'Mod' and lt == [ti.string_type]:
        t = [ti.string_type] # (string % anything) is a string.
    else:
        ti.stats.n_binop_fail += 1
        if trace and trace_fail:
            if 1:
                s = '%r %s %r' % (lt,op_kind,rt)
                g.trace('  fail: %30s %s' % (s,ti.format(node)))
            else:
                g.trace('  fail:',lt,op_kind,rt,ti.format(node))
        t = [Inference_Error(node)] ### Should merge types!
    if trace and trace_infer: g.trace(ti.format(node),'->',t)
    return t
#@+node:ekr.20150312225028.334: *8* ti.BoolOp
def do_BoolOp(self,node):

    ti = self    
    return [ti.bool_type]
#@+node:ekr.20150312225028.335: *8* ti.Call & helpers
# Call(expr func, expr* args, keyword* keywords, expr? starargs, expr? kwargs)
#   Note: node.starargs and node.kwargs are given only if assigned explicitly.

def do_Call (self,node):
    '''
    Infer the value of a function called with a particular set of arguments.
    '''
    ti = self
    trace = False and not g.app.runningAllUnitTests
    trace_builtins = True
    trace_errors = True ; trace_returns = False

    kind = ti.kind(node)
    func_name = ti.find_function_call(node)
    
    if trace: g.trace('1:entry:',func_name) # ,before='\n',
    
    # Special case builtins.
    t = ti.builtin_type_dict.get(func_name,[])
    if t:
        if trace and trace_builtins: g.trace(func_name,t)
        return t
        
    # Find the def or ctor to be evaluated.
    e = ti.find_call_e(node.func)
    if not (e and e.node):
        # find_call_e has given the warning.
        t = [Unknown_Type(node)]
        s = '%s(**no e**)' % (func_name)
        if trace and trace_errors: g.trace('%17s -> %s' % (s,t))
        return t

    # Special case classes.  More work is needed.
    if ti.kind(e.node) == 'ClassDef':
        # Return a type representing an instance of the class
        # whose ctor is evaluated in the present context.
        args,t = ti.class_instance(e)
        if trace and trace_returns:
            s = '%s(%s)' % (func_name,args)
            g.trace('%17s -> %s' % (s,t))
        return t

    # Infer the specific arguments and gather them in args list.
    # Each element of the args list may have multiple types.
    assert ti.kind(e.node) == 'FunctionDef'
    args = ti.infer_actual_args(e,node)
        
    # Infer the function for the cross-product the args list.
    # In the cross product, each argument has exactly one type.
    ti.stats.n_ti_calls += 1
    recursive_args,t = [],[]
    t2 = ti.infer_def(node,rescan_flag=False) ### specific_args,e,node,)
    if ti.is_recursive(t2):
        recursive_args.append(t2)
    t.extend(t2)

    if True and recursive_args:
        if trace: g.trace('===== rerunning inference =====',t)
        for t2 in recursive_args:
            t3 = ti.infer_def(node,rescan_flag=True) ### specific_args,e,node,rescan_flag=True)
            t.extend(t3)
        
    if ti.has_failed(t):
        t = ti.merge_failures(t)
        # t = ti.ignore_failures(t)
    else:
        t = ti.clean(t)
    if trace and trace_returns:
        s = '3:return %s(%s)' % (func_name,args)
        g.trace('%17s -> %s' % (s,t))
    return t
#@+node:ekr.20150312225028.336: *9* ti.class_instance
def class_instance (self,e):
    
    '''
    Return a type representing an instance of the class
    whose ctor is evaluated in the present context.
    '''
    
    ti = self
    trace = True and not g.app.runningAllUnitTests
    cx = e.self_context
    
    # Step 1: find the ctor if it exists.
    d = cx.st.d
    ctor = d.get('__init__')

    # node2 = node.value
    # name = node2.id
    # attr = node.attr
    # e = getattr(node2,'e',None)
    # if trace: g.trace(kind,v_kind,name,attr)
    # # g.trace('e',e)
    # t = ti.get_cache(e)
    # # g.trace('cache',t)
    # if len(t) == 1:
        # t = t[0]
        # e_value = t.node.e
        # # g.trace('* e_value',e_value)
        # # g.trace('e_value.self_context',e_value.self_context)
        # e = e_value.self_context.st.d.get(node.attr)
        # if trace: g.trace('** e_value.self_context.st.d.get(%s)' % (attr),e)
        # # g.trace('e_value.self_context.st.d', e_value.self_context.st.d)
        # # g.trace('e.node',e.node)
        
    args = [] ### To do
    t = [Class_Type(cx)]
    # ti.set_cache(e,t,tag='class name')
    return args,t
#@+node:ekr.20150312225028.337: *9* ti.find_call_e
def find_call_e (self,node):
    
    '''Find the symbol table entry for node, an ast.Call node.'''
    
    ti = self
    trace = False and not g.app.runningAllUnitTests
    trace_errors = False; trace_fuzzy = True ; trace_return = False
    kind = ti.kind(node)
    e = None # Default.
    if kind == 'Name':
        # if trace: g.trace(kind,node.id)
        e = getattr(node,'e',None)
    else:
        t = ti.visit(node) or []
        if len(t) == 1:
            ti.stats.n_not_fuzzy += 1
            t = t[0]
            if ti.kind(t) == 'Class_Type':
                d = t.cx.st.d
                if ti.kind(node) == 'Attribute':
                    name = node.attr
                elif ti.kind(node) == 'Call':
                    name = node.func
                else:
                    name = None
                if name:
                    e = d.get(name)
                else:
                    e = None
            else:
                if trace and trace_errors:
                    g.trace('not a class type: %s %s' % (ti.kind(t),ti.format(node)))
        elif len(t) > 1:
            if trace and trace_fuzzy: g.trace('fuzzy',t,ti.format(node))
            ti.stats.n_fuzzy += 1
            e = None
        
    # elif kind == 'Attribute':
        # v_kind = ti.kind(node.value)
        # if v_kind == 'Name':
            # node2 = node.value
            # name = node2.id
            # attr = node.attr
            # e = getattr(node2,'e',None)
            # # if trace: g.trace(kind,v_kind,name,attr)
            # t = ti.get_cache(e)
            # if len(t) == 1:
                # t = t[0]
                # if ti.kind(t) == 'Class_Type':
                    # d = t.cx.st.d
                    # e = d.get(node.attr)
                # else:
                    # pass ### To do
            # elif t:
                # pass
            # else:
                # t = [Unknown_Type(node)]
        # elif v_kind == 'Attribute':
            # node2 = node.value
            # g.trace('*****',kind,v_kind,ti.format(node.value))
            # e = ti.find_call_e(node2)
        # else:
            # g.trace('not ready yet',kind,v_kind)
            # e = None
    # elif kind in ('Call','Subscript'):
        # g.trace(kind)
        # e = None
    # else:
        # g.trace('===== oops:',kind)
        # e = None
        
    # if e:
        # assert isinstance(e,SymbolTableEntry),ti.kind(e)
        # ti.stats.n_find_call_e_success += 1
    # else:
        # # Can happen with methods,Lambda.
        # ti.stats.n_find_call_e_fail += 1
        # if trace and trace_errors: g.trace('**** no e!',kind,ti.format(node),
            # before='\n')

    # if e and not e.node:
        # if trace and trace_errors: g.trace(
            # 'undefined e: %s' % (e),before='\n')

    # if trace and trace_return: g.trace(
        # kind,'e:',e,ti.format(node))
    # return e
#@+node:ekr.20150312225028.338: *9* ti.infer_actual_args
# Call(expr func, expr* args, keyword* keywords, expr? starargs, expr? kwargs)
#   keyword = (identifier arg, expr value) # keyword arguments supplied to call

# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)
#   arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)

def infer_actual_args (self,e,node):
    
    '''Return a list of types for all actual args, in the order defined in
    by the entire formal argument list.'''
    
    ti = self
    trace = False and not g.app.runningAllUnitTests
    trace_args = False
    assert ti.kind(node)=='Call'
    cx = e.self_context
    # Formals...
    formals  = cx.node.args or []
    defaults = cx.node.args.defaults or [] # A list of expressions
    vararg   = cx.node.args.vararg
    kwarg    = cx.node.args.kwarg
    # Actuals...
    actuals  = node.args or [] # A list of expressions.
    keywords = node.keywords or [] # A list of (identifier,expression) pairs.
    starargs = node.starargs
    kwargs   = node.kwargs
    assert ti.kind(formals)=='arguments'
    assert ti.kind(formals.args)=='list'
    
    formal_names = [z.id for z in formals.args]
        # The names of *all* the formal arguments, include those with defauls.
        # Doesw not include vararg and kwarg.
       
    # Append unnamed actual args.
    # These could be either non-keyword arguments or keyword arguments.
    args = [ti.visit(z) for z in actuals]
    bound_names = formal_names[:len(actuals)]
    
    if trace and trace_args:
        g.trace('formal names',formal_names)
        g.trace('   arg names',bound_names)
        g.trace('    starargs',starargs and ti.format(starargs))
        g.trace('    keywargs',kwargs   and ti.format(kwargs))
        # formal_defaults = [ti.visit(z) for z in defaults]
            # # The types of each default.
        # g.trace('formal default types',formal_defaults)
        # g.trace('unnamed actuals',ti.format(actuals))
    
    # Add keyword args in the call, in the order they appear in the formal list.
    # These could be either non-keyword arguments or keyword arguments.
    keywargs_d = {}
    keywords_d = {}
    for keyword in keywords:
        name = keyword.arg
        t = ti.visit(keyword.value)
        value = ti.format(keyword.value)
        keywords_d[name] = (value,t)

    for name in formal_names[len(actuals):]:
        data = keywords_d.get(name)
        if data:
            value,t = data
            if trace and trace_args: g.trace('set keyword',name,value,t)
            args.append(t)
            bound_names.append(name)
        # else: keywargs_d[name] = None ### ???

    # Finally, add any defaults from the formal args.
    n_plain = len(formal_names) - len(defaults)
    defaults_dict = {}
    for i,expr in enumerate(defaults):
        name = formal_names[n_plain+i]
        value = ti.format(expr)
        t = ti.visit(expr)
        defaults_dict[name] = (value,t)

    for name in formal_names:
        if name not in bound_names:
            data = defaults_dict.get(name)
            t = None # default
            if data:
                value,t = data
                if trace and trace_args: g.trace('set default',name,value,t)
            elif name == 'self':
                def_cx = e.self_context
                class_cx = def_cx and def_cx.class_context
                if class_cx:
                    t = [Class_Type(class_cx)]
            if t is None:
                t = [Unknown_Arg_Type(node)]
                ti.error('Unbound actual argument: %s' % (name))
            args.append(t)
            bound_names.append(name)
            
    ### Why should this be true???
    # assert sorted(formal_names) == sorted(bound_names)

    if None in args:
        g.trace('***** opps node.args: %s, args: %s' % (node.args,args))
        args = [z for z in args if z is not None]
        
    if trace: g.trace('result',args)
    return args
#@+node:ekr.20150312225028.339: *9* ti.infer_def & helpers (sets call cache)
def infer_def(self,node,rescan_flag):
    
    '''Infer everything possible from a def D called with specific args:
    
    1. Bind the specific args to the formal parameters in D.
    2. Infer all assignments in D.
    3. Infer all outer expression in D.
    4. Infer all return statements in D.
    '''
    
    ti = self
    trace = False and not g.app.runningAllUnitTests
    return ###

    # t0 = ti.get_call_cache(e,hash_) or []
    # if hash_ in ti.call_stack and not rescan_flag:
        # # A recursive call: always add an Recursive_Instance marker.
        # if trace:g.trace('A recursive','rescan',rescan_flag,hash_,'->',t0)
        # ti.stats.n_recursive_calls += 1
        # t = [Recursive_Inference(node)]
    # else:
        # if trace: g.trace('A',hash_,'->',t0)
        # ti.call_stack.append(hash_)
        # try:
            # cx = e.self_context
            # # data = ti.switch_context(e,hash_,node)
            # ti.bind_args(specific_args,cx,e,node)
            # ti.infer_assignments(cx,e)
            # ti.infer_outer_expressions(cx,node)
            # t = ti.infer_return_statements(cx,e)
            # ti.restore_context(data)
        # finally:
            # hash2 = ti.call_stack.pop()
            # assert hash2 == hash_
    # # Merge the result and reset the cache.
    # t.extend(t0)
    # t = ti.clean(t)
    # if trace: g.trace('B',hash_,'->',t)
    # return t
#@+node:ekr.20150312225028.340: *10* ti.bind_args (ti.infer_def helper) (To do: handle self)
# Call(expr func, expr* args, keyword* keywords, expr? starargs, expr? kwargs)
#   keyword = (identifier arg, expr value) # keyword arguments supplied to call

# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)
#   arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)

def bind_args (self,types,cx,e,node):
    
    ti = self
    trace = False and not g.app.runningAllUnitTests
    assert ti.kind(node)=='Call'
    assert isinstance(node.args,list),node
    formals = cx.node.args or []
    assert ti.kind(formals)=='arguments'
    assert ti.kind(formals.args)=='list'
    formal_names = [z.id for z in formals.args]
        # The names of *all* the formal arguments, include those with defauls.
        
    if len(formal_names) != len(types):
        # g.trace('**** oops: formal_names: %s types: %s' % (formal_names,types))
        return

    def_cx = e.self_context
    d = def_cx.st.d
    for i,name in enumerate(formal_names):
        pass ### 
        ### Handle self here.
        # t = types[i]
        # e2 = d.get(name)
        # if e2:
            # if trace: g.trace(e2,t) # g.trace(e2.name,t)
            # ti.set_cache(e2,[t],tag='bindargs:%s'%(name))
        # else:
            # g.trace('**** oops: no e2',name,d)
#@+node:ekr.20150312225028.341: *10* ti.infer_assignments
def infer_assignments(self,cx,e):
    
    '''Infer all the assignments in the function context.'''

    ti = self
    trace = False and not g.app.runningAllUnitTests
    for a in cx.assignments_list:
        if ti.kind(a) == 'Assign': # ignore AugAssign.
            pass ####

            # t2 = ti.get_cache(a)
            # if t2:
                # ti.stats.n_assign_hits += 1
                # if trace: g.trace('hit!',t2)
            # else:
                # t2 = ti.visit(a)
                # t3 = ti.ignore_failures(t2)
                # if t3:
                    # ti.stats.n_assign_misses += 1
                    # # g.trace('***** set cache',t2)
                    # ti.set_cache(a,t2,tag='infer_assns')
                    # if trace: g.trace('miss',t2)
                # else:
                    # ti.stats.n_assign_fails += 1
                    # if trace: g.trace('fail',t2)
               
                   
    return None # This value is never used.
#@+node:ekr.20150312225028.342: *10* ti.infer_outer_expressions
def infer_outer_expressions(self,cx,node):
    
    '''Infer all outer expressions in the function context.'''

    ti = self
    trace = False and not g.app.runningAllUnitTests
    for exp in cx.expressions_list:
        if trace: g.trace(ti.format(exp))
        ti.stats.n_outer_expr_misses += 1
        t = ti.visit(exp)

    return None # This value is never used.
#@+node:ekr.20150312225028.343: *10* ti.infer_return_statements
def infer_return_statements(self,cx,e):
    
    '''Infer all return_statements in the function context.'''
    
    ti = self
    trace = False and not g.app.runningAllUnitTests
    t = []
    for r in cx.returns_list:
        t2 = ti.visit(r)
        if trace: g.trace('miss',t2)
        t.extend(t2)
    if ti.has_failed(t):
        t = ti.merge_failures(t)
    else:
        t = ti.clean(t)
    return t
#@+node:ekr.20150312225028.344: *8* ti.Compare
# Compare(expr left, cmpop* ops, expr* comparators)

def do_Compare(self,node):
    
    ti = self
    ti.visit(node.left)
    for z in node.comparators:
        ti.visit(z)
    return [ti.bool_type]
#@+node:ekr.20150312225028.345: *8* ti.comprehension
def do_comprehension(self,node):

    ti = self
    ti.visit(node.target) # A name.
    ti.visit(node.iter) # An attribute.
    return [List_Type(node)]
#@+node:ekr.20150312225028.346: *8* ti.Expr
# Expr(expr value)

def do_Expr(self,node):
    
    ti = self
    t = ti.visit(node.value)
    return t
#@+node:ekr.20150312225028.347: *8* ti.GeneratorExp
def do_GeneratorExp (self,node):

    ti = self
    trace = False and not g.app.runningAllUnitTests
    junk = ti.visit(node.elt)
    t = []
    for node2 in node.generators:
        t2 = ti.visit(node2)
        t.extend(t2)
    if ti.has_failed(t):
        t = ti.merge_failures(t)
        if trace: g.trace('failed inference',ti.format(node),t)
    else:
        t = ti.clean(t)
    return t
#@+node:ekr.20150312225028.348: *8* ti.IfExp
# The ternary operator
# IfExp(expr test, expr body, expr orelse)

def do_IfExp(self,node):
    
    ti = self    
    junk = ti.visit(node.test)
    t = ti.visit(node.body)
    t.extend(ti.visit(node.orelse))
    if ti.has_failed(t):
        t = ti.merge_failures(t)
    else:
        t = ti.clean(t)
    return t
#@+node:ekr.20150312225028.349: *8* ti.Index
def do_Index(self,node):

    ti = self    
    return ti.visit(node.value)
#@+node:ekr.20150312225028.350: *8* ti.Lambda
def do_Lambda (self,node):
    
    ti = self
    return ti.visit(node.body)
#@+node:ekr.20150312225028.351: *8* ti.ListComp
def do_ListComp(self,node):
    
    ti = self
    # g.trace(node.elt,node.generators)
    junk = ti.visit(node.elt)
    t = []
    for node2 in node.generators:
        t.extend(ti.visit(node2))
    if ti.has_failed(t):
        t = ti.merge_failures(t)
    else:
        t = ti.clean(t)
    return t
#@+node:ekr.20150312225028.352: *8* ti.Name (**rewrite)
def do_Name(self,node):
    
    ti = self ; u = ti.u
    trace = True and not g.app.runningAllUnitTests
    trace_infer = False ; trace_fail = False
    trace_self = False
    ctx_kind = ti.kind(node.ctx)
    name = node.id
    trace = trace and name == 'i'
    
    # # Reaching sets are useful only for Load attributes.
    # if ctx_kind not in ('Load','Param'):
        # # if trace: g.trace('skipping %s' % ctx_kind)
        # return []

    # ### ast.Name nodes for class base names have no 'e' attr.
    # if not hasattr(node,'e'):
        # if trace: g.trace('no e',node)
        # return []

    if name == 'self':
        # reach = getattr(node,'reach',[])
        # if reach: g.trace('**** assignment to self')
        cx = node.stc_context ### should be class context.
        if cx:
            if trace and trace_self: g.trace('found self',cx)
            t = [Class_Type(cx)]
        else:
            g.trace('**** oops: no class context for self',ti.format(node))
            t = [Unknown_Type(node)]
    else:
        reach = getattr(node,'reach',[])
        t = []
        for node2 in reach:
            # The reaching sets are the RHS of assignments.
            t = [Unknown_Type(node)]
            t2 = ti.visit(node2)
            if isinstance(t2,(list,tuple)):
                t.extend(t2)
            else:
                g.trace('**oops:',t2,ti.format(node2))
        if ti.has_failed(t):
            t = ti.merge_failures(t)
        else:
            t = ti.clean(t)

    if trace and trace_infer and t:
        g.trace('infer',t,u.format(node))
    if trace and trace_fail and not t:
        g.trace('fail ',name,ctx_kind,'reach:',
            ['%s:%s' % (id(z),u.format(z)) for z in reach])
    return t
#@+node:ekr.20150312225028.353: *8* ti.Slice
def do_Slice(self,node):
    
    ti = self
    if node.upper: junk = ti.visit(node.upper)
    if node.lower: junk = ti.visit(node.lower)
    if node.step:  junk = ti.visit(node.step)
    return [ti.int_type] ### ???
#@+node:ekr.20150312225028.354: *8* ti.Subscript (to do)
def do_Subscript(self,node):

    ti = self
    trace = False and not g.app.runningAllUnitTests
    t1 = ti.visit(node.value)
    t2 = ti.visit(node.slice)
    if t1 and trace: g.trace(t1,t2,ti.format(node))
    return t1 ### ?
#@+node:ekr.20150312225028.355: *8* ti.UnaryOp
def do_UnaryOp(self,node):
    
    ti = self
    trace = True and not g.app.runningAllUnitTests
    t = ti.visit(node.operand) or []
    t = ti.clean(t)
    op_kind = ti.kind(node.op)
    if op_kind == 'Not':
        t = [self.bool_type]
    elif t == [self.int_type] or t == [self.float_type]:
        pass # All operators are valid.
    else:
        ti.stats.n_unop_fail += 1
        if trace: g.trace(' fail:',op_kind,t,ti.format(node))
        t = [Unknown_Type(node)]
    return t
#@+node:ekr.20150312225028.356: *7* ti.primitive Types
#@+node:ekr.20150312225028.357: *8* ti.Builtin
def do_Builtin(self,node):

    ti = self
    return [ti.builtin_type]
#@+node:ekr.20150312225028.358: *8* ti.Bytes
def do_Bytes(self,node):

    ti = self
    return [ti.bytes_type]
#@+node:ekr.20150312225028.359: *8* ti.Dict
# Dict(expr* keys, expr* values)

def do_Dict(self,node):
    
    ti = self
    for z in node.keys:
        ti.visit(z)
    for z in node.values:
        ti.visit(z)
    return [Dict_Type(node)]
        ### More specific type.
#@+node:ekr.20150312225028.360: *8* ti.List
# List(expr* elts, expr_context ctx) 

def do_List(self,node):
    
    ti = self
    for z in node.elts:
        ti.visit(z)
    # ti.visit(node.ctx)
    return [List_Type(node)]
#@+node:ekr.20150312225028.361: *8* ti.Num
def do_Num(self,node):
    
    ti = self
    t_num = Num_Type(node.n.__class__)
    # g.trace(ti.format(node),'->',t_num)
    return [t_num]
#@+node:ekr.20150312225028.362: *8* ti.Str
def do_Str(self,node):
    
    '''This represents a string constant.'''

    ti = self
    return [ti.string_type]
#@+node:ekr.20150312225028.363: *8* ti.Tuple
# Tuple(expr* elts, expr_context ctx)

def do_Tuple(self,node):
    
    ti = self
    for z in node.elts:
        ti.visit(z)
    # ti.visit(node.ctx)
    return [Tuple_Type(node)]
#@+node:ekr.20150312225028.364: *7* ti.statements
#@+node:ekr.20150312225028.365: *8* ti.arguments
# arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)

def do_arguments (self,node):
    
    '''Bind formal arguments to actual arguments.'''
    
    assert False # All the work is done in ti.Call and its helpers.
#@+node:ekr.20150312225028.366: *8* ti.Assign (**rewrite)
def do_Assign(self,node):

    ti = self
    trace = False and not g.app.runningAllUnitTests
    t_val = ti.visit(node.value)
    t = []
    for z in node.targets:
        t.append(ti.visit(z))
    t = ti.clean(t)
    return t

    # if data in ti.assign_stack:
        # t = [Circular_Assignment(node)]
        # ti.stats.n_circular_assignments += 1
    # else:
        # ti.assign_stack.append(data)
        # try:
            # t = ti.visit(node.value)
            # if trace: g.trace(t)
        # finally:
            # data2 = ti.assign_stack.pop()
            # assert data == data2
        
    # for target in node.targets:
        # kind = ti.kind(target)
        # if kind == 'Name':
            # t0 = ti.get_cache(target.e) or []
            # t.extend(t0)
            # ti.set_cache(target.e,t,tag='Name:target.e')
            # if trace: g.trace('infer: %10s -> %s' % (
                # ti.format(target),t),before='\n')
        # else:
            # ### What to do about this?
            # if trace: g.trace('(ti) not a Name: %s' % (
                # ti.format(target)),before='\n')
                
    # # Update the cache immediately.
    # t0 = ti.get_cache(node) or []
    # t.extend(t0)
    # t = ti.clean(t)
    # ti.set_cache(node,t,tag='ti.Assign')
    # return t
#@+node:ekr.20150312225028.367: *8* ti.ClassDef (could be default)
def do_ClassDef(self,node):
    
    ti = self
    for z in node.body:
        self.visit(z)
#@+node:ekr.20150312225028.368: *8* ti.For
# For(expr target, expr iter, stmt* body, stmt* orelse)

def do_For (self,tree):

    ti = self
    ### what if target conflicts with an assignment??
    ti.visit(tree.target)
    ti.visit(tree.iter)
    for z in tree.body:
        ti.visit(z)
    for z in tree.orelse:
        ti.visit(z)
#@+node:ekr.20150312225028.369: *8* ti.FunctionDef & helpers (**rewrite)
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)

def do_FunctionDef (self,node):
    
    '''Infer this function or method with 'unknown' as the value of all args.
    This gets inference going.
    '''
    
    ti = self
    trace = False and not g.app.runningAllUnitTests
    ti.infer_outer_def(node)
    
    # # Set up function call, with 'unknown' for all args.
    # e = node.e
    # specific_args = [Unknown_Arg_Type(node)] * ti.count_full_args(node)
    # hash_ = ti.cache_hash(specific_args,e)
    # t = ti.get_call_cache(e,hash_)
    # if trace:
        # g.trace('%s %12s -> %s' % ('miss' if t is None else 'hit!',
            # node.name,specific_args))
    # if t is None:
        # t = ti.infer_outer_def(specific_args,hash_,node)
    # return t
#@+node:ekr.20150312225028.370: *9* ti.count_full_args
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)
#   arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)

def count_full_args (self,node):
    
    '''Return the number of arguments in a call to the function/def defined
    by node, an ast.FunctionDef node.'''
    
    ti = self
    trace = False and not g.app.runningAllUnitTests
    assert ti.kind(node)=='FunctionDef'    
    args = node.args
    if trace: g.trace('args: %s vararg: %s kwarg: %s' % (
        [z.id for z in args.args],args.vararg,args.kwarg))
    n = len(args.args)
    if args.vararg: n += 1
    if args.kwarg:  n += 1
    return n
#@+node:ekr.20150312225028.371: *9* ti.infer_outer_def & helper
def infer_outer_def(self,node):
    
    '''Infer everything possible from a def D called with specific args:
    
    1. Bind the args to the formal parameters in D.
    2. Infer all assignments in D.
    3. Infer all outer expression in D.
    4. Infer all return statements in D.
    '''
    
    return []

    # ti = self
    # # trace = True and not g.app.runningAllUnitTests
    # assert ti.kind(node)=='FunctionDef',node
    # e = node.e
    # assert hasattr(e,'call_cache')
    # cx = e.self_context
    # ### data = ti.switch_context(e,hash_,node)
    # ti.bind_outer_args(node)
    # ti.infer_assignments(cx,e)
    # ti.infer_outer_expressions(cx,node)
    # t = ti.infer_return_statements(cx,e)
    # ### ti.set_call_cache(e,hash_,t,tag='infer_def')
    # ### ti.restore_context(data)
    # return t
#@+node:ekr.20150312225028.372: *10* ti_bind_outer_args (ti.infer_outer_def helper)
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)
#   arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)
def bind_outer_args (self,node):
    
    '''Bind all all actual arguments except 'self' to "Unknown_Arg_Type".'''
    
    ti = self
    trace = False and not g.app.runningAllUnitTests
    assert ti.kind(node)=='FunctionDef'
    e = node.e
    def_cx = e.self_context
    args = node.args or []
    assert ti.kind(args)=='arguments',args
    assert ti.kind(args.args)=='list',args.args
    formal_names = [z.id if hasattr(z,'id') else '<tuple arg>' for z in args.args]
    if args.vararg: formal_names.append(args.vararg)
    if args.kwarg:  formal_names.append(args.kwarg)
    # if trace: g.trace(formal_names)
    d = def_cx.st.d
    for name in formal_names:
        if name == 'self':
            if def_cx:
                t = [Class_Type(def_cx)]
            else:
                t = [Unknown_Arg_Type(node)]
            e2 = e
        else:
            t = [Unknown_Arg_Type(node)]
            e2 = d.get(name)
        # if e2:
            # ti.set_cache(e2,t,tag='bind_outer_args:%s'%(name))
            # if trace: g.trace(name,t)
        # else:
            # if trace: g.trace('**** oops: no e2',name,d)
#@+node:ekr.20150312225028.373: *8* ti.Import (not used)
# def do_Import(self,node):
    
    # pass
#@+node:ekr.20150312225028.374: *8* ti.ImportFrom (not used)
# def do_ImportFrom(self,node):
    
    # pass
#@+node:ekr.20150312225028.375: *8* ti.Return & ti.Yield & helper
def do_Return(self,node):
    return self.return_helper(node)
    
def do_Yield(self,node):
    return self.return_helper(node)
#@+node:ekr.20150312225028.376: *9* ti.return_helper
def return_helper(self,node):

    ti = self
    trace = False and not g.app.runningAllUnitTests
    e = ti.call_e
    assert e
    if node.value:
        t = ti.visit(node.value)
        if ti.has_failed(t):
            ti.stats.n_return_fail += 1
            t = ti.ignore_unknowns(t)
        if t:
            ti.stats.n_return_success += 1
        else:
            ti.stats.n_return_fail += 1
            t = [] # Do **not** propagate a failure here!
    else:
        t = [ti.none_type]
    if trace: g.trace(t,ti.format(node))
    return t
#@+node:ekr.20150312225028.377: *8* ti.With
def do_With (self,node):

    ti = self
    t = []
    for z in node.body:
        t.append(ti.visit(z))
    t = ti.clean(t)
    return t
#@+node:ekr.20150312225028.378: *5* class TypeInferer (AstTraverser)
class TypeInferer (AstTraverser):
    
    '''
    This class infers the types of objects.
    
    See the documentation for complete details.
    '''
    
    @others
#@+node:ekr.20150312225028.379: *6*  ti.ctor
def __init__ (self,enable_trace=True):
    
    AstTraverser.__init__(self)
    
    u = Utils()
    self.cache_traverser = CacheTraverser()
    self.dump_ast = u.dump_ast
    self.format = u.format
    self.kind = None
    self.stats = u.stats
    
    # Detecting circular inferences
    self.call_stack = [] # Detects recursive calls
    self.assign_stack = [] # Detects circular assignments.

    # Create singleton instances of simple types.
    self.bool_type = Bool_Type()
    self.builtin_type = Builtin_Type()
    self.bytes_type = Bytes_Type()
    self.float_type = Float_Type()
    self.int_type = Int_Type()
    self.none_type = None_Type()
    self.string_type = String_Type()
    
    # Create the builtin type dict.
    self.builtin_type_dict = {
        'eval': [self.none_type],
        'id':   [self.int_type],
        'len':  [self.int_type],
        'ord':  [self.int_type],
        # list,tuple...
        # close,open,sort,sorted,super...
    }
    
    # Context info.
    self.null_hash = 'hash:none'
    self.call_args = None # The list of argument types for the present call.
    self.call_e = None
    self.call_hash = self.null_hash
        # The hash associated with self.call_args.
        # All hashes must start with 'hash:'

    # Debugging.
    self.align = 15
    self.enable_trace = enable_trace and u.enable_trace
    self.level = 0 # Node nesting level.
    self.n_caches = 0
    self.trace_level = 0 # Trace nesting level.
    self.trace_context = False
    self.trace_context_level = 0
#@+node:ekr.20150312225028.380: *6*  ti.run (entry point)
def run (self,node):
    
    # pylint: disable=W0221
        # Arguments number differs from overridden method.
    
    ti = self
    t1 = time.time()
    ti.check_visitor_names()
    ti.visit(node)
    ti.u.stats.n_caches += ti.n_caches
    t2 = time.time()
    return t2-t1
#@+node:ekr.20150312225028.381: *6* ti.caches
# Note: the hash_ argument to ti.get_call_cache can't easily be removed.
#@+node:ekr.20150312225028.382: *7* ti.cache_hash
def cache_hash(self,args,e):
    
    ''' Return a hash for a list of type arg. This must be a perfect hash:
    collisions must not be possible.
    
    ti.set_cache asserts hash.startswith('hash:') This ensures that hashes
    can't be confused wiht tracing tags.'''

    return 'hash:%s([%s])' % (
        '%s@%s' % (e.name,id(e)),
        ','.join([repr(arg) for arg in args]))
#@+node:ekr.20150312225028.383: *7* ti.get_cache
def get_cache(self,obj):
    
    '''Return the value of object's cache for the present context, creating
    the cache as needed.'''
    
    ti = self
    if not hasattr(obj,'cache'):
        obj.cache = {}
        ti.n_caches += 1
    return obj.cache.get(ti.call_hash,None)
        # None is the signal for a cache miss.
#@+node:ekr.20150312225028.384: *7* ti.get_call_cache
def get_call_cache(self,obj,hash_):
    
    '''Return the value of object's cache for the present context, creating
    the cache as needed.'''
    
    ti = self
    if not hasattr(obj,'call_cache'):
        obj.call_cache = {}
        ti.n_caches += 1

    return obj.call_cache.get(hash_)
#@+node:ekr.20150312225028.385: *7* ti.method_hash
def method_hash(self,e,node):
    
    '''If this is a method call, return the hash for the inferred ctor's
    arguments. Otherwise, return the empty string.'''
    
    ti = self
    trace = False and ti.enable_trace
    
    if ti.kind(e.self_context) == 'ClassContext':
        class_cx = e.self_context
        class_name = class_cx.name
        ctor = e.self_context.ctor
        if ctor:
            args = [ti.visit(z) for z in ctor.node.args.args]
            for specific_args in ti.cross_product(args):
                hash_ = ti.cache_hash(specific_args,e)
            return 'hash:class:%s:%s' % (class_name,hash_)
        else:
            if trace: ti.trace(class_name,'no ctor')
    return ''
#@+node:ekr.20150312225028.386: *7* ti.set_cache
def set_cache(self,obj,t,tag=''):

    '''Set the object's cache for the present context to the given type
    list t, creating the cache as needed.'''

    ti = self
    trace = False and ti.enable_trace
    assert isinstance(t,list)
    hash_ = ti.call_hash
    assert hash_.startswith('hash:'),hash_
        # Don't confuse the hash_ and tag_ keywords!
    assert isinstance(t,list)
    if not hasattr(obj,'cache'):
        obj.cache = {}
        ti.n_caches += 1
    
    obj.cache [hash_] = t
    if trace: ti.trace('%s -> %s' % (hash_,t))
        # ti.show_cache(obj,obj.cache,tag)
       
    # Old 
    # aList = obj.cache.get(hash_,[])
    # aList.extend(t)
    # aList = ti.clean(aList)
    #obj.cache [hash_] = aList
#@+node:ekr.20150312225028.387: *7* ti.set_call_cache
def set_call_cache(self,obj,hash_,t,tag=''):
    
    ti=self
    trace = False and ti.enable_trace
    if not hasattr(obj,'call_cache'):
        obj.call_cache = {}
        ti.n_caches += 1

    # Update e.call_cache, not e.cache!
    assert isinstance(t,list)
    obj.call_cache [hash_] = t
    
    if trace: ti.trace('%s:%s -> %s' % (obj,hash_,t))
 
    ### Old
    # aList = obj.call_cache.get(hash_,[])
    # aList.extend(t)
    # aList = ti.clean(aList)
    # obj.call_cache [hash_] = aList
#@+node:ekr.20150312225028.388: *7* ti.show_cache
def show_cache(self,obj,cache,tag):
    
    ti = self
    d = cache
    # kind = ti.kind(obj)
    pad = ' '*2*ti.level
    s = ti.format(obj) if isinstance(obj,ast.AST) else repr(obj)
    if len(s) > 40: s = s[:40]+'...'
    g.trace('%2s %s %s' % (ti.level,tag,s))
    # pad2 = pad + ' '*44
    for key in sorted(d.keys()):
        aList = d.get(key)
        for item in aList:
            print('   %s => %s' % (key,item))
    print('')
#@+node:ekr.20150312225028.389: *6* ti.has_children
def has_children(self,node):
    
    assert isinstance(node,ast.AST),node.__class__.__name__
    
    return any(self.get_child_nodes(node))
#@+node:ekr.20150312225028.390: *6* ti.helpers
#@+node:ekr.20150312225028.391: *7* ti.clean
def clean (self,aList):
    
    '''Return sorted(aList) with all duplicates removed.'''
    
    ti = self
    if 1:
        # Good for debugging and traces.
        result = []
        for z in aList:
            if z not in result:
                result.append(z)
        
        # An excellent check.
        assert len(result) == len(list(set(aList))),aList
    else:
        result = list(set(aList))
   
    # Strip out inference errors if there are real results.
    result2 = ti.ignore_failures(result)
    if result2:
        ti.stats.n_clean_success += 1
        return sorted(result2)
    else:
        ti.stats.n_clean_fail += 1
        return sorted(result)
#@+node:ekr.20150312225028.392: *7* ti.cross_product
def cross_product(self,aList):
    
    '''Return generator yielding a list of lists representing the
    cross product of all elements in aList, a list of lists. Examples:
        
    cross_product([['a']])               -> [['a']]
    cross_product([['a'],['b']])         -> [['a'],['b']]
    cross_product([['a'],['b','c']])     -> [['a','b'],['a','c']]
    cross_product([['a','b'],['c']])     -> [['a','c'],['b','c']]
    cross_product([['a','b'],['c','d']]) -> [['a','c'],['a','d'],['b','c'],['b','d']]
    '''
    
    ti = self
    trace = False and ti.enable_trace
    
    # Return a real list so we can do stats on it.
    result = [z for z in itertools.product(*aList)]
    
    if 0:
        g.trace(len(aList),aList)
        for z in result:
            print(z)
    
    if 1: # Stats and traces.
        ambig = len(result) > 1
        if trace and ambig: g.trace('\n',aList,'->',result)
        ti.stats.n_cross_products += 1
        n = len(result)
        d = ti.stats.cross_product_dict
        d[n] = 1 + d.get(n,0)

    return result
#@+node:ekr.20150312225028.393: *7* ti.switch/restore_context
# These are called *only* from ti.infer_def.
 
def switch_context(self,e,hash_,node):
    ti = self
    ti.trace_context = False and ti.enable_trace
    data = ti.call_args,ti.call_e,ti.call_hash
    ti.call_args = e.call_cache.get(hash_)
    ti.call_e = e
    ti.call_hash = hash_
    if ti.trace_context:
        ti.trace(ti.call_hash,before='\n'+' '*2*ti.trace_context_level)
    ti.trace_context_level += 1
    return data
    
def restore_context(self,data):
    ti = self
    ti.call_args,ti.call_e,ti.call_hash = data
    ti.trace_context_level -= 1
    if self.trace_context:
        ti.trace(ti.call_hash,before=' '*2*ti.trace_context_level)
#@+node:ekr.20150312225028.394: *7* ti.type helpers
def has_failed(self,t1,t2=[],t3=[]):
    
    return any([isinstance(z,Inference_Failure) for z in t1+t2+t3])
    
def is_circular(self,t1,t2=[],t3=[]):
    
    return any([isinstance(z,Circular_Assignment) for z in t1+t2+t3])
    
def is_recursive(self,t1,t2=[],t3=[]):
    
    return any([isinstance(z,Recursive_Inference) for z in t1+t2+t3])
    
def ignore_failures(self,t1,t2=[],t3=[]):
    
    return [z for z in t1+t2+t3 if not isinstance(z,Inference_Failure)]
    
def ignore_unknowns(self,t1,t2=[],t3=[]):
    
    return [z for z in t1+t2+t3 if not isinstance(z,(Unknown_Type,Unknown_Arg_Type))]
    
def merge_failures(self,t1,t2=[],t3=[]):

    aList = [z for z in t1+t2+t3 if isinstance(z,Inference_Failure)]
    if len(aList) > 1:
        # Prefer the most specific reason for failure.
        aList = [z for z in aList if not isinstance(z,Unknown_Type)]
    return aList
#@+node:ekr.20150312225028.395: *6* ti.trace
def trace(self,*args,**keys):
    
    ti = self

    if 1: # No indentation at all.
        level = 0
    elif 1: # Show tree level.
        level = ti.level
    else: # Minimize trace level.
        if ti.trace_level < ti.level:
            ti.trace_level += 1
        elif ti.trace_level > ti.level:
            ti.trace_level -= 1
        level = ti.trace_level
        
    if keys.get('before') is None:
        before = '.'*level
    else:
        before = keys.get('before')
  
    keys['align'] = ti.align
    keys['before'] = before
    keys['caller_level'] = 2
    g.trace(*args,**keys)
#@+node:ekr.20150312225028.396: *6* ti.traversers
#@+node:ekr.20150312225028.397: *7*  ti.visit
def visit(self,node):
    
    '''Infer the types of all nodes in a tree of AST nodes.'''

    ti = self
    assert isinstance(node,ast.AST),node.__class__.__name__
    method_name = 'do_' + node.__class__.__name__
    method = getattr(self,method_name,None)
    val = None
    try:
        ti.level += 1
        if method:
            # The method is responsible for traversing subtrees.
            # Furthermore, somebody uses the returned value.
            val = method(node)
        else:
            # Traverse subtrees automatically.
            # *Nobody* uses the returned value.
            for child in ti.get_child_nodes(node):
                ti.visit(child)
    
            # Returning None is a good test.
            val = None
    finally:
        ti.level -= 1
    return val
#@+node:ekr.20150312225028.398: *7*  ti.visit_children
def visit_children(self,node):
    
    ti = self
    assert isinstance(node,ast.AST),node.__class__.__name__
    
    for child in ti.get_child_nodes(node):
        ti.visit(child)

    # Returning None is a good test.
    return None
#@+node:ekr.20150312225028.399: *7*  ti.visit_list
def visit_list (self,aList):

    ti = self
    assert type(aList) in (list,tuple),aList
    
    for node in aList:
        ti.visit(node)
        
    # Returning None is a good test.
    return None
#@+node:ekr.20150312225028.400: *6* ti.visitors
#@+node:ekr.20150312225028.401: *7* ti.arguments
# arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)

def do_arguments (self,node):
    
    '''Bind formal arguments to actual arguments.'''
    
    assert False # All the work is done in ti.Call and its helpers.
#@+node:ekr.20150312225028.402: *7* ti.Assign (sets cache)
def do_Assign(self,node):

    ti = self
    trace = False and ti.enable_trace
    junk = ti.visit_list(node.targets)
    hash_ = ti.call_hash
    data = hash_,node
    if data in ti.assign_stack:
        t = [Circular_Assignment(hash_,node)]
        ti.stats.n_circular_assignments += 1
    else:
        ti.assign_stack.append(data)
        try:
            t = ti.visit(node.value)
            if trace: ti.trace(t)
        finally:
            data2 = ti.assign_stack.pop()
            assert data == data2
        
    for target in node.targets:
        kind = ti.kind(target)
        if kind == 'Name':
            t0 = ti.get_cache(target.e) or []
            t.extend(t0)
            ti.set_cache(target.e,t,tag='Name:target.e')
            if trace: ti.trace('infer: %10s -> %s' % (
                ti.format(target),t),before='\n')
        else:
            ### What to do about this?
            if trace: ti.trace('(ti) not a Name: %s' % (
                ti.format(target)),before='\n')
                
    # Update the cache immediately.
    t0 = ti.get_cache(node) or []
    t.extend(t0)
    t = ti.clean(t)
    ti.set_cache(node,t,tag='ti.Assign')
    return t
#@+node:ekr.20150312225028.403: *7* ti.Attribute & check_attr (check super classes for attributes)
# Attribute(expr value, identifier attr, expr_context ctx)

def do_Attribute (self,node):

    ti = self
    trace = False and ti.enable_trace
    trace_errors = True ; trace_found = False ; trace_fuzzy = True
    # print('do_Attribute',ti.format(node),node.value,node.attr)
    
    t = ti.get_cache(node)
    if t is not None:
        # g.trace('hit',t)
        return t

    #### ti.set_cache(node,[Unknown_Type(ti.call_hash,node)],tag='ti.Attribute')
    t = ti.visit(node.value)
    t = ti.clean(t) ###
    t = ti.merge_failures(t)
    tag = '%s.%s' % (t,node.attr) # node.attr is always a string.
    if t:
        if len(t) == 1:
            ti.stats.n_not_fuzzy += 1
            t1 = t[0]
            if ti.kind(t1) == 'Class_Type':
                aList = t1.cx.ivars_dict.get(node.attr)
                aList = ti.clean(aList) if aList else []
                if aList:
                    t = []
                    for node2 in aList:
                        t.extend(ti.visit(node2))
                    t = ti.clean(t)
                    ti.set_cache(node,t,tag='ti.Attribute')
                    ti.stats.n_attr_success += 1
                    if trace and trace_found:
                        g.trace('ivar found: %s -> %s' % (tag,t))
                        # [ti.format(z) for z in aList],t))
                elif t1.cx.bases:
                    if trace_errors: g.trace('bases',
                        ti.format(node),[ti.format(z) for z in t1.cx.bases])
                    pass ### Must check super classes.
                    t = [Unknown_Type(ti.call_hash,node)] ###
                else:
                    ti.error('%20s has no %s member' % (ti.format(node),t1.cx.name))
                    t = [Unknown_Type(ti.call_hash,node)] ###
            else:
                ti.stats.n_attr_fail += 1
                if trace and trace_errors:
                    g.trace('fail',t,ti.format(node))
                t = [Unknown_Type(ti.call_hash,node)] ###
        else:
            ti.stats.n_fuzzy += 1
            if trace and trace_fuzzy: g.trace('fuzzy',t,ti.format(node))
    else:
        if trace and trace_errors: g.trace('fail',t,ti.format(node))
        t = [Unknown_Type(ti.call_hash,node)]

    # ti.check_attr(node) # Does nothing
    return t
#@+node:ekr.20150312225028.404: *8* ti.check_attr
def check_attr(self,node):
    
    ti = self
    trace = False and ti.enable_trace
    
    return ### Now done in ti.Attribute

    # assert ti.kind(node) == 'Attribute'
    # value = node.value
    # # node.attr is always a string.
    
    # if ti.kind(value) == 'Name':
        # # The ssa pass has computed the ivars dict.
        # # There is no need to examine value.ctx.
        # name = value.id
        # name_e = value.e
        # name_cx = name_e.cx
        # name_class_cx = name_cx.class_context
        # if name == 'self':
            # if name_class_cx:
                # if node.attr in name_class_cx.ivars_dict:
                    # if trace: g.trace('OK: %s.%s' % (
                        # name_class_cx.name,node.attr))
                # else:
                    # ti.error('%s has no %s member' % (
                        # name_class_cx.name,node.attr))
            # else:
                # ti.error('%s is not a method of any class' % (
                    # name)) ####
        # else:
            # ### To do: handle any id whose inferred type is a class or instance.
            # if trace:
                # g.trace('** not checked: %s' % (name))
                # g.trace(ti.dump_ast(value))
#@+node:ekr.20150312225028.405: *7* ti.Builtin
def do_Builtin(self,node):
    
    assert False,node
#@+node:ekr.20150312225028.406: *7* ti.Call & helpers
# Call(expr func, expr* args, keyword* keywords, expr? starargs, expr? kwargs)
#   Note: node.starargs and node.kwargs are given only if assigned explicitly.

def do_Call (self,node):
    '''
    Infer the value of a function called with a particular set of arguments.
    '''
    ti = self
    trace = False and ti.enable_trace
    trace_builtins = True ; trace_hit = False
    trace_errors = True ; trace_returns = False

    kind = ti.kind(node)
    func_name = ti.find_function_call(node)
    
    if trace and trace_hit: ti.trace('1:entry:',func_name) # ,before='\n',
    
    # Special case builtins.
    t = ti.builtin_type_dict.get(func_name,[])
    if t:
        if trace and trace_builtins: ti.trace(func_name,t)
        return t
        
    # Find the def or ctor to be evaluated.
    e = ti.find_call_e(node.func)
    if not (e and e.node):
        # find_call_e has given the warning.
        t = [Unknown_Type(ti.call_hash,node)]
        s = '%s(**no e**)' % (func_name)
        if trace and trace_errors: ti.trace('%17s -> %s' % (s,t))
        return t

    # Special case classes.  More work is needed.
    if ti.kind(e.node) == 'ClassDef':
        # Return a type representing an instance of the class
        # whose ctor is evaluated in the present context.
        args,t = ti.class_instance(e)
        if trace and trace_returns:
            s = '%s(%s)' % (func_name,args)
            ti.trace('%17s -> %s' % (s,t))
        return t

    # Infer the specific arguments and gather them in args list.
    # Each element of the args list may have multiple types.
    assert ti.kind(e.node) == 'FunctionDef'
    args = ti.infer_actual_args(e,node)
        
    # Infer the function for the cross-product the args list.
    # In the cross product, each argument has exactly one type.
    ti.stats.n_ti_calls += 1
    recursive_args,t = [],[]
    for specific_args in ti.cross_product(args):
        # Add the specific arguments to the cache.
        hash_ = ti.cache_hash(specific_args,e)
        t2 = ti.get_call_cache(e,hash_)
        miss = t2 is None   
        # if trace and trace_hit:
            # ti.trace('%s %12s -> %s' % ('miss' if miss else 'hit!',
                # func_name,specific_args))
        if miss:
            ti.stats.n_call_misses += 1
            if trace and trace_hit: ti.trace('2:miss',hash_)
            t2 = ti.infer_def(specific_args,e,hash_,node,rescan_flag=False)
            if ti.is_recursive(t2):
                data = hash_,specific_args,t2
                recursive_args.append(data)
            # if trace and trace_returns: ti.trace(hash_,'->',t2)
        else:
            if trace and trace_hit: ti.trace('2:hit!',hash_)
            ti.stats.n_call_hits += 1
        t.extend(t2)

    if True and recursive_args:
        if trace: ti.trace('===== rerunning inference =====',t)
        for data in recursive_args:
            # Merge the types into the cache.
            hash_,specific_args,t2 = data
            t3  = ti.get_call_cache(e,hash_) or []
            t4 = ti.ignore_failures(t,t2,t3)
            # g.trace('t3',t3)
            # g.trace('t4',t4)
            ti.set_call_cache(e,hash_,t4,tag='ti.call:recursive')
            t5 = ti.infer_def(specific_args,e,hash_,node,rescan_flag=True)
            if trace: g.trace('t5',t5)
            t.extend(t5)
        
    if ti.has_failed(t):
        t = ti.merge_failures(t)
        # t = ti.ignore_failures(t)
    else:
        t = ti.clean(t)
    if trace and trace_returns:
        s = '3:return %s(%s)' % (func_name,args)
        ti.trace('%17s -> %s' % (s,t))
    return t
#@+node:ekr.20150312225028.407: *8* ti.class_instance
def class_instance (self,e):
    
    '''
    Return a type representing an instance of the class
    whose ctor is evaluated in the present context.
    '''
    
    ti = self
    trace = True and ti.enable_trace
    cx = e.self_context
    
    # Step 1: find the ctor if it exists.
    d = cx.st.d
    ctor = d.get('__init__')

    # node2 = node.value
    # name = node2.id
    # attr = node.attr
    # e = getattr(node2,'e',None)
    # if trace: ti.trace(kind,v_kind,name,attr)
    # # ti.trace('e',e)
    # t = ti.get_cache(e)
    # # ti.trace('cache',t)
    # if len(t) == 1:
        # t = t[0]
        # e_value = t.node.e
        # # ti.trace('* e_value',e_value)
        # # ti.trace('e_value.self_context',e_value.self_context)
        # e = e_value.self_context.st.d.get(node.attr)
        # if trace: ti.trace('** e_value.self_context.st.d.get(%s)' % (attr),e)
        # # ti.trace('e_value.self_context.st.d', e_value.self_context.st.d)
        # # ti.trace('e.node',e.node)
        
    args = [] ### To do
    t = [Class_Type(cx)]
    ti.set_cache(e,t,tag='class name')
    return args,t
#@+node:ekr.20150312225028.408: *8* ti.find_call_e
def find_call_e (self,node):
    
    '''Find the symbol table entry for node, an ast.Call node.'''
    
    ti = self
    trace = False and ti.enable_trace
    trace_errors = False; trace_fuzzy = True ; trace_return = False
    kind = ti.kind(node)
    e = None # Default.
    if kind == 'Name':
        # if trace: ti.trace(kind,node.id)
        e = getattr(node,'e',None)
    else:
        t = ti.visit(node)
        if len(t) == 1:
            ti.stats.n_not_fuzzy += 1
            t = t[0]
            if ti.kind(t) == 'Class_Type':
                d = t.cx.st.d
                if ti.kind(node) == 'Attribute':
                    name = node.attr
                elif ti.kind(node) == 'Call':
                    name = node.func
                else:
                    name = None
                if name:
                    e = d.get(name)
                else:
                    e = None
            else:
                if trace and trace_errors:
                    ti.trace('not a class type: %s %s' % (ti.kind(t),ti.format(node)))
        elif len(t) > 1:
            if trace and trace_fuzzy: ti.trace('fuzzy',t,ti.format(node))
            ti.stats.n_fuzzy += 1
            e = None
        
    # elif kind == 'Attribute':
        # v_kind = ti.kind(node.value)
        # if v_kind == 'Name':
            # node2 = node.value
            # name = node2.id
            # attr = node.attr
            # e = getattr(node2,'e',None)
            # # if trace: ti.trace(kind,v_kind,name,attr)
            # t = ti.get_cache(e)
            # if len(t) == 1:
                # t = t[0]
                # if ti.kind(t) == 'Class_Type':
                    # d = t.cx.st.d
                    # e = d.get(node.attr)
                # else:
                    # pass ### To do
            # elif t:
                # pass
            # else:
                # t = [Unknown_Type(ti.call_hash,node)]
        # elif v_kind == 'Attribute':
            # node2 = node.value
            # ti.trace('*****',kind,v_kind,ti.format(node.value))
            # e = ti.find_call_e(node2)
        # else:
            # ti.trace('not ready yet',kind,v_kind)
            # e = None
    # elif kind in ('Call','Subscript'):
        # ti.trace(kind)
        # e = None
    # else:
        # ti.trace('===== oops:',kind)
        # e = None
        
    if e:
        assert isinstance(e,SymbolTableEntry),ti.kind(e)
        ti.stats.n_find_call_e_success += 1
    else:
        # Can happen with methods,Lambda.
        ti.stats.n_find_call_e_fail += 1
        if trace and trace_errors: ti.trace('**** no e!',kind,ti.format(node),
            align=ti.align,before='\n')

    if e and not e.node:
        if trace and trace_errors: ti.trace(
            'undefined e: %s' % (e),before='\n')

    if trace and trace_return: ti.trace(
        kind,'e:',e,ti.format(node))
    return e
#@+node:ekr.20150312225028.409: *8* ti.infer_actual_args
# Call(expr func, expr* args, keyword* keywords, expr? starargs, expr? kwargs)
#   keyword = (identifier arg, expr value) # keyword arguments supplied to call

# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)
#   arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)

def infer_actual_args (self,e,node):
    
    '''Return a list of types for all actual args, in the order defined in
    by the entire formal argument list.'''
    
    ti = self
    trace = False and ti.enable_trace
    trace_args = False
    assert ti.kind(node)=='Call'
    cx = e.self_context
    # Formals...
    formals  = cx.node.args or []
    defaults = cx.node.args.defaults or [] # A list of expressions
    vararg   = cx.node.args.vararg
    kwarg    = cx.node.args.kwarg
    # Actuals...
    actuals  = node.args or [] # A list of expressions.
    keywords = node.keywords or [] # A list of (identifier,expression) pairs.
    starargs = node.starargs
    kwargs   = node.kwargs
    assert ti.kind(formals)=='arguments'
    assert ti.kind(formals.args)=='list'
    
    formal_names = [z.id for z in formals.args]
        # The names of *all* the formal arguments, include those with defauls.
        # Doesw not include vararg and kwarg.
       
    # Append unnamed actual args.
    # These could be either non-keyword arguments or keyword arguments.
    args = [ti.visit(z) for z in actuals]
    bound_names = formal_names[:len(actuals)]
    
    if trace and trace_args:
        ti.trace('formal names',formal_names)
        ti.trace('   arg names',bound_names)
        ti.trace('    starargs',starargs and ti.format(starargs))
        ti.trace('    keywargs',kwargs   and ti.format(kwargs))
        # formal_defaults = [ti.visit(z) for z in defaults]
            # # The types of each default.
        # ti.trace('formal default types',formal_defaults)
        # ti.trace('unnamed actuals',[ti.format(z) for z in actuals])
    
    # Add keyword args in the call, in the order they appear in the formal list.
    # These could be either non-keyword arguments or keyword arguments.
    keywargs_d = {}
    keywords_d = {}
    for keyword in keywords:
        name = keyword.arg
        t = ti.visit(keyword.value)
        value = ti.format(keyword.value)
        keywords_d[name] = (value,t)

    for name in formal_names[len(actuals):]:
        data = keywords_d.get(name)
        if data:
            value,t = data
            if trace and trace_args: ti.trace('set keyword',name,value,t)
            args.append(t)
            bound_names.append(name)
        # else: keywargs_d[name] = None ### ???

    # Finally, add any defaults from the formal args.
    n_plain = len(formal_names) - len(defaults)
    defaults_dict = {}
    for i,expr in enumerate(defaults):
        name = formal_names[n_plain+i]
        value = ti.format(expr)
        t = ti.visit(expr)
        defaults_dict[name] = (value,t)

    for name in formal_names:
        if name not in bound_names:
            data = defaults_dict.get(name)
            t = None # default
            if data:
                value,t = data
                if trace and trace_args: ti.trace('set default',name,value,t)
            elif name == 'self':
                def_cx = e.self_context
                class_cx = def_cx and def_cx.class_context
                if class_cx:
                    t = [Class_Type(class_cx)]
            if t is None:
                t = [Unknown_Arg_Type(ti.call_hash,node)]
                ti.error('Unbound actual argument: %s' % (name))
            args.append(t)
            bound_names.append(name)
            
    ### Why should this be true???
    # assert sorted(formal_names) == sorted(bound_names)

    if None in args:
        ti.trace('***** opps node.args: %s, args: %s' % (node.args,args))
        args = [z for z in args if z is not None]
        
    if trace: ti.trace('result',args)
    return args
#@+node:ekr.20150312225028.410: *8* ti.infer_def & helpers (sets call cache)
def infer_def(self,specific_args,e,hash_,node,rescan_flag):
    
    '''Infer everything possible from a def D called with specific args:
    
    1. Bind the specific args to the formal parameters in D.
    2. Infer all assignments in D.
    3. Infer all outer expression in D.
    4. Infer all return statements in D.
    '''

    ti = self
    trace = False and ti.enable_trace
    t0 = ti.get_call_cache(e,hash_) or []
    if hash_ in ti.call_stack and not rescan_flag:
        # A recursive call: always add an Recursive_Instance marker.
        if trace:ti.trace('A recursive','rescan',rescan_flag,hash_,'->',t0)
        ti.stats.n_recursive_calls += 1
        t = [Recursive_Inference(hash_,node)]
    else:
        if trace: ti.trace('A',hash_,'->',t0)
        ti.call_stack.append(hash_)
        try:
            cx = e.self_context
            data = ti.switch_context(e,hash_,node)
            ti.bind_args(specific_args,cx,e,node)
            ti.infer_assignments(cx,e)
            ti.infer_outer_expressions(cx,node)
            t = ti.infer_return_statements(cx,e)
            ti.restore_context(data)
        finally:
            hash2 = ti.call_stack.pop()
            assert hash2 == hash_
    # Merge the result and reset the cache.
    t.extend(t0)
    t = ti.clean(t)
    ti.set_call_cache(e,hash_,t,tag='infer_def')
        # Important: does *not* use ti.call_hash.
    if trace: ti.trace('B',hash_,'->',t)
    return t
#@+node:ekr.20150312225028.411: *9* ti.bind_args (ti.infer_def helper) (To do: handle self)
# Call(expr func, expr* args, keyword* keywords, expr? starargs, expr? kwargs)
#   keyword = (identifier arg, expr value) # keyword arguments supplied to call

# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)
#   arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)

def bind_args (self,types,cx,e,node):
    
    ti = self
    trace = False and ti.enable_trace
    assert ti.kind(node)=='Call'
    assert isinstance(node.args,list),node
    formals = cx.node.args or []
    assert ti.kind(formals)=='arguments'
    assert ti.kind(formals.args)=='list'
    formal_names = [z.id for z in formals.args]
        # The names of *all* the formal arguments, include those with defauls.
        
    if len(formal_names) != len(types):
        # ti.trace('**** oops: formal_names: %s types: %s' % (formal_names,types))
        return

    def_cx = e.self_context
    d = def_cx.st.d
    for i,name in enumerate(formal_names):
        ### Handle self here.
        t = types[i]
        e2 = d.get(name)
        if e2:
            if trace: ti.trace(e2,t) # ti.trace(e2.name,t)
            ti.set_cache(e2,[t],tag='bindargs:%s'%(name))
        else:
            ti.trace('**** oops: no e2',name,d)
#@+node:ekr.20150312225028.412: *9* ti.infer_assignments
def infer_assignments(self,cx,e):
    
    '''Infer all the assignments in the function context.'''

    ti = self
    trace = False and ti.enable_trace
    for a in cx.assignments_list:
        if ti.kind(a) == 'Assign': # ignore AugAssign.
            t2 = ti.get_cache(a)
            if t2:
                ti.stats.n_assign_hits += 1
                if trace: ti.trace('hit!',t2)
            else:
                t2 = ti.visit(a)
                t3 = ti.ignore_failures(t2)
                if t3:
                    ti.stats.n_assign_misses += 1
                    # ti.trace('***** set cache',t2)
                    ti.set_cache(a,t2,tag='infer_assns')
                    if trace: ti.trace('miss',t2)
                else:
                    ti.stats.n_assign_fails += 1
                    if trace: ti.trace('fail',t2)
               
                   
    return None # This value is never used.
#@+node:ekr.20150312225028.413: *9* ti.infer_outer_expressions
def infer_outer_expressions(self,cx,node):
    
    '''Infer all outer expressions in the function context.'''

    ti = self
    trace = False and ti.enable_trace
    for exp in cx.expressions_list:
        if trace: ti.trace(ti.call_hash,ti.format(exp))
        t2 = ti.get_cache(exp)
        if t2 is not None:
            ti.stats.n_outer_expr_hits += 1
            if trace: ti.trace('hit!',t2)
        else:
            ti.stats.n_outer_expr_misses += 1
            # ti.trace('miss',ti.call_hash)
            # Set the cache *before* calling ti.visit to terminate the recursion.
            t = [Unknown_Type(ti.call_hash,node)]
            ti.set_cache(exp,t,tag='ti.infer_outer_expressions')
            t = ti.visit(exp)
            ti.set_cache(exp,t,tag='ti.infer_outer_expressions')
            if trace: ti.trace('miss',t)

    return None # This value is never used.
#@+node:ekr.20150312225028.414: *9* ti.infer_return_statements
def infer_return_statements(self,cx,e):
    
    '''Infer all return_statements in the function context.'''
    
    ti = self
    trace = True and ti.enable_trace
    trace_hit = False
    t = []
    for r in cx.returns_list:
        assert r
        t2 = ti.get_cache(r)
        if t2:
            if trace and trace_hit: ti.trace('hit!',t2)
        else:
            t2 = ti.visit(r)
            if trace and trace_hit: ti.trace('miss',t2)
            t.extend(t2)
    if ti.has_failed(t):
        t = ti.merge_failures(t)
    else:
        t = ti.clean(t)
    return t
#@+node:ekr.20150312225028.415: *7* ti.ClassDef
def do_ClassDef(self,node):
    
    '''
    For lint-like operation: infer all methods with 'unknown' as the value of all args.
    For jit-like operation: do nothing.
    '''
    
    ti = self
    if lint_like:
        return ti.visit_children(node)
    else:
        return [] # This value should not be used.
#@+node:ekr.20150312225028.416: *7* ti.Expr (new)
# Expr(expr value)

# This isn't really needed: the default visitor would work.

def do_Expr(self,node):
    
    ti = self
    t = ti.visit(node.value)
    return t
#@+node:ekr.20150312225028.417: *7* ti.FunctionDef & helpers
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)

def do_FunctionDef (self,node):
    
    '''Infer this function or method with 'unknown' as the value of all args.
    This gets inference going.
    '''
    
    ti = self
    trace = False and not ti.enable_trace
    
    # Set up function call, with 'unknown' for all args.
    e = node.e
    specific_args = [Unknown_Arg_Type(ti.call_hash,node)] * ti.count_full_args(node)
    hash_ = ti.cache_hash(specific_args,e)
    t = ti.get_call_cache(e,hash_)
    if trace:
        ti.trace('%s %12s -> %s' % ('miss' if t is None else 'hit!',
            node.name,specific_args))
    if t is None:
        t = ti.infer_outer_def(specific_args,hash_,node)
    return t

#@+node:ekr.20150312225028.418: *8* ti.count_full_args
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)
#   arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)

def count_full_args (self,node):
    
    '''Return the number of arguments in a call to the function/def defined
    by node, an ast.FunctionDef node.'''
    
    ti = self
    trace = False and ti.enable_trace
    assert ti.kind(node)=='FunctionDef'    
    args = node.args
    if trace: ti.trace('args: %s vararg: %s kwarg: %s' % (
        [z.id for z in args.args],args.vararg,args.kwarg))
    n = len(args.args)
    if args.vararg: n += 1
    if args.kwarg:  n += 1
    return n
#@+node:ekr.20150312225028.419: *8* ti.infer_outer_def & helper
def infer_outer_def(self,specific_args,hash_,node):
    
    '''Infer everything possible from a def D called with specific args:
    
    1. Bind the args to the formal parameters in D.
    2. Infer all assignments in D.
    3. Infer all outer expression in D.
    4. Infer all return statements in D.
    '''

    ti = self
    # trace = True and ti.enable_trace
    assert ti.kind(node)=='FunctionDef',node
    e = node.e
    assert hasattr(e,'call_cache')
    cx = e.self_context
    data = ti.switch_context(e,hash_,node)
    ti.bind_outer_args(hash_,node)
    ti.infer_assignments(cx,e)
    ti.infer_outer_expressions(cx,node)
    t = ti.infer_return_statements(cx,e)
    ti.set_call_cache(e,hash_,t,tag='infer_def')
    ti.restore_context(data)
    return t
#@+node:ekr.20150312225028.420: *9* ti_bind_outer_args (ti.infer_outer_def helper)
# FunctionDef(identifier name, arguments args, stmt* body, expr* decorator_list)
#   arguments = (expr* args, identifier? vararg, identifier? kwarg, expr* defaults)
def bind_outer_args (self,hash_,node):
    
    '''Bind all all actual arguments except 'self' to "Unknown_Arg_Type".'''
    
    ti = self
    trace = False and ti.enable_trace
    assert ti.kind(node)=='FunctionDef'
    e = node.e
    def_cx = e.self_context
    args = node.args or []
    assert ti.kind(args)=='arguments',args
    assert ti.kind(args.args)=='list',args.args
    formal_names = [z.id if hasattr(z,'id') else '<tuple arg>' for z in args.args]
    if args.vararg: formal_names.append(args.vararg)
    if args.kwarg:  formal_names.append(args.kwarg)
    # if trace: ti.trace(formal_names)
    d = def_cx.st.d
    for name in formal_names:
        if name == 'self':
            if def_cx:
                t = [Class_Type(def_cx)]
            else:
                t = [Unknown_Arg_Type(ti.call_hash,node)]
            e2 = e
        else:
            t = [Unknown_Arg_Type(ti.call_hash,node)]
            e2 = d.get(name)
        if e2:
            ti.set_cache(e2,t,tag='bind_outer_args:%s'%(name))
            if trace: ti.trace(name,t)
        else:
            if trace: ti.trace('**** oops: no e2',name,d)
#@+node:ekr.20150312225028.421: *7* ti.Lambda
def do_Lambda (self,node):
    
    ti = self
    return ti.visit(node.body)
#@+node:ekr.20150312225028.422: *7* ti.operators
#@+node:ekr.20150312225028.423: *8* ti.BinOp & helper
def do_BinOp (self,node):

    ti = self
    trace = True and ti.enable_trace
    trace_infer = False ; trace_fail = True
    lt = ti.visit(node.left)
    rt = ti.visit(node.right)
    lt = ti.clean(lt)
    rt = ti.clean(rt)
    op_kind = ti.kind(node.op)
    num_types = ([ti.float_type],[ti.int_type])
    list_type = [List_Type(None)]
    if rt in num_types and lt in num_types:
        if rt == [ti.float_type] or lt == [ti.float_type]:
            t = [ti.float_type]
        else:
            t = [ti.int_type]
    elif rt == list_type and lt == list_type and op_kind == 'Add':
        t = list_type
    elif op_kind == 'Add' and rt == [ti.string_type] and lt == [ti.string_type]:
        t = [ti.string_type]
    elif op_kind == 'Mult' and rt == [ti.string_type] and lt == [ti.string_type]:
        ti.trace('*** User error: string mult')
        t = [Unknown_Type(ti.call_hash,node)]
    elif op_kind == 'Mult' and (
        (lt==[ti.string_type] and rt==[ti.int_type]) or
        (lt==[ti.int_type] and rt==[ti.string_type])
    ):
        t = [ti.string_type]
    elif op_kind == 'Mod' and lt == [ti.string_type]:
        t = [ti.string_type] # (string % anything) is a string.
    else:
        ti.stats.n_binop_fail += 1
        if trace and trace_fail:
            if 1:
                s = '%r %s %r' % (lt,op_kind,rt)
                g.trace('  fail: %30s %s' % (s,ti.format(node)))
            else:
                g.trace('  fail:',lt,op_kind,rt,ti.format(node))
        t = [Inference_Error(ti.call_hash,node)] ### Should merge types!
    if trace and trace_infer: ti.trace(ti.format(node),'->',t)
    return t
#@+node:ekr.20150312225028.424: *8* ti.BoolOp
def do_BoolOp(self,node):

    ti = self    
    junk = ti.visit_children(node)
    return [ti.bool_type]
#@+node:ekr.20150312225028.425: *8* ti.Compare
def do_Compare(self,node):

    ti = self    
    junk = ti.visit_children(node)
    return [ti.bool_type]
#@+node:ekr.20150312225028.426: *8* ti.comprehension
def do_comprehension(self,node):

    ti = self    
    junk = ti.visit_children(node)

    # name = node.name
    # ti.visit(node.it)

    # for node2 in node.ifs:
        # ti.visit(node2)

    return [List_Type(node)]
#@+node:ekr.20150312225028.427: *8* ti.Expr (not used)
# def do_Expr (self,node):

    # ti = self    
    # return ti.visit(node.value)
#@+node:ekr.20150312225028.428: *8* ti.GeneratorExp
def do_GeneratorExp (self,node):

    ti = self
    trace = False and ti.enable_trace
    junk = ti.visit(node.elt)
    t = []
    for node2 in node.generators:
        t2 = ti.visit(node2)
        t.extend(t2)
    if ti.has_failed(t):
        t = ti.merge_failures(t)
        if trace: ti.trace('failed inference',ti.format(node),t)
    else:
        t = ti.clean(t)
    return t
#@+node:ekr.20150312225028.429: *8* ti.IfExp (Ternary operator)
# IfExp(expr test, expr body, expr orelse)

def do_IfExp(self,node):
    
    ti = self    
    junk = ti.visit(node.test)
    t = ti.visit(node.body)
    t.extend(ti.visit(node.orelse))
    if ti.has_failed(t):
        t = ti.merge_failures(t)
    else:
        t = ti.clean(t)
    return t
#@+node:ekr.20150312225028.430: *8* ti.Index (default, at present)
def do_Index(self,node):

    ti = self    
    return ti.visit(node.value)
#@+node:ekr.20150312225028.431: *8* ti.ListComp
def do_ListComp(self,node):
    
    ti = self
    # ti.trace(node.elt,node.generators)
    junk = ti.visit(node.elt)
    t = []
    for node2 in node.generators:
        t.extend(ti.visit(node2))
    if ti.has_failed(t):
        t = ti.merge_failures(t)
    else:
        t = ti.clean(t)
    return t
#@+node:ekr.20150312225028.432: *8* ti.Slice
def do_Slice(self,node):
    
    ti = self
    if node.upper: junk = ti.visit(node.upper)
    if node.lower: junk = ti.visit(node.lower)
    if node.step:  junk = ti.visit(node.step)
    return [ti.int_type] ### ???
#@+node:ekr.20150312225028.433: *8* ti.Subscript (*** to do)
def do_Subscript(self,node):

    ti = self
    trace = False and not ti.enable_trace
    t1 = ti.visit(node.value)
    t2 = ti.visit(node.slice)
    if t1 and trace: g.trace(t1,t2,ti.format(node))
    return t1 ### ?
#@+node:ekr.20150312225028.434: *8* ti.UnaryOp
def do_UnaryOp(self,node):
    
    ti = self
    trace = True and ti.enable_trace
    t = ti.visit(node.operand)
    t = ti.clean(t)
    op_kind = ti.kind(node.op)
    if op_kind == 'Not':
        t == [self.bool_type]
    elif t == [self.int_type] or t == [self.float_type]:
        pass # All operators are valid.
    else:
        ti.stats.n_unop_fail += 1
        if trace: g.trace(' fail:',op_kind,t,ti.format(node))
        t = [Unknown_Type(ti.call_hash,node)]
    return t
#@+node:ekr.20150312225028.435: *7* ti.primitive Types
#@+node:ekr.20150312225028.436: *8* ti.Builtin
def do_Builtin(self,node):

    ti = self
    assert not ti.has_children(node)
    return [ti.builtin_type]

#@+node:ekr.20150312225028.437: *8* ti.Bytes
def do_Bytes(self,node):

    ti = self
    assert not ti.has_children(node)    
    return [ti.bytes_type]
#@+node:ekr.20150312225028.438: *8* ti.Dict
def do_Dict(self,node):

    ti = self
    junk = ti.visit_children(node)
    return [Dict_Type(node)]
        ### More specific type.
#@+node:ekr.20150312225028.439: *8* ti.List
def do_List(self,node): 
            
    ti = self
    junk = ti.visit_children(node)
    return [List_Type(node)]
#@+node:ekr.20150312225028.440: *8* ti.Num
def do_Num(self,node):
    
    ti = self
    assert not ti.has_children(node)
    t_num = Num_Type(node.n.__class__)
    # ti.trace(ti.format(node),'->',t_num)
    return [t_num]
#@+node:ekr.20150312225028.441: *8* ti.Str
def do_Str(self,node):
    
    '''This represents a string constant.'''

    ti = self
    assert not ti.has_children(node)
    return [ti.string_type]
#@+node:ekr.20150312225028.442: *8* ti.Tuple
def do_Tuple (self,node):

    ti = self
    junk = ti.visit_children(node)
    return [Tuple_Type(node)]
#@+node:ekr.20150312225028.443: *7* ti.statements
#@+node:ekr.20150312225028.444: *8* ti.For
def do_For(self,node):
    
    ### what if target conflicts with an assignment??
    
    ti = self
    # ti.visit(node.iter)
    # ti.visit_list(node.body)
    # if node.orelse:
        # ti.visit_list(node.orelse)
        
    return ti.visit_children(node)
#@+node:ekr.20150312225028.445: *8* ti.Import (not used)
# def do_Import(self,node):
    
    # pass
#@+node:ekr.20150312225028.446: *8* ti.ImportFrom (not used)
# def do_ImportFrom(self,node):
    
    # pass
#@+node:ekr.20150312225028.447: *8* ti.Return & ti.Yield & helper
def do_Return(self,node):
    ti = self
    return ti.return_helper(node)
    
def do_Yield(self,node):
    ti = self
    return ti.return_helper(node)
    
#@+node:ekr.20150312225028.448: *9* ti.return_helper (sets cache)
def return_helper(self,node):

    ti = self
    trace = False and ti.enable_trace
    trace_hash = False
    assert node
    e,hash_ = ti.call_e,ti.call_hash
    assert e
    assert hash_
    if node.value:
        t = ti.visit(node.value)
        if ti.has_failed(t):
            ti.stats.n_return_fail += 1
            t = ti.ignore_unknowns(t)
        if t:
            # Don't set the cache unless we succeed!
            ti.set_cache(node,t,tag=ti.format(node))
            ti.stats.n_return_success += 1
        else:
            ti.stats.n_return_fail += 1
            t = [] # Do **not** propagate a failure here!
    else:
        t = [ti.none_type]
    # Set the cache.
    t0 = ti.get_call_cache(e,hash_) or []
    t.extend(t0)
    ti.set_call_cache(e,hash_,t,tag='ti.return')
    if trace:
        if trace_hash: ti.trace(t,hash_,ti.format(node))
        else:          ti.trace(t,ti.format(node))
    return t
#@+node:ekr.20150312225028.449: *8* ti.With
def do_With (self,node):

    ti = self
    t = ti.visit_list(node.body)
    # ti.trace(t)
    return t
#@+node:ekr.20150312225028.450: *7* ti.Name (***ivars don't work)
def do_Name(self,node):
    
    ti = self
    trace = True and ti.enable_trace
    trace_hit = False ; trace_infer = False
    trace_fail = True ; trace_self = False
    ctx_kind = ti.kind(node.ctx)
    name = node.id
    trace = trace and name == 'i'
    hash_ = ti.call_hash
    
    # Reaching sets are useful only for Load attributes.
    if ctx_kind not in ('Load','Param'):
        # if trace: ti.trace('skipping %s' % ctx_kind)
        return []

    ### ast.Name nodes for class base names have no 'e' attr.
    if not hasattr(node,'e'):
        if trace: ti.trace('no e',node)
        return []

    t = ti.get_cache(node.e) or []
    t = ti.clean(t)
    t = ti.ignore_failures(t)
    if t:
        if trace and trace_hit: ti.trace('**hit!',t,name)
    elif name == 'self':
        e = node.e
        reach = getattr(e,'reach',[])
        if reach: ti.trace('**** assignment to self')
        cx = e.cx.class_context
        if cx:
            d = cx.ivars_dict
            if trace and trace_self: ti.trace('found self',e.name)
                # ti.u.dump_ivars_dict(d)) # Very expensive
            t = [Class_Type(cx)]
        else:
            ti.trace('**** oops: no class context for self',ti.format(node))
            t = [Unknown_Type(ti.call_hash,node)]
    else:
        reach = getattr(node.e,'reach',[])
        t = []
        for node2 in reach:
            # The reaching sets are the RHS of assignments.
            t2 = ti.get_cache(node2)
            if t2 is None:
                # Set the cache *before* calling ti.visit to terminate the recursion.
                t = [Unknown_Type(ti.call_hash,node)]
                ti.set_cache(node2,t,tag='ti.Name')
                t2 = ti.visit(node2)
                ti.set_cache(node2,t2,tag='ti.Name')
            if isinstance(t2,(list,tuple)):
                t.extend(t2)
            else:
                ti.trace('**oops:',t2,ti.format(node2))
        if ti.has_failed(t):
            t = ti.merge_failures(t)
        else:
            t = ti.clean(t)

    if trace and trace_infer and t:
        ti.trace('infer',t,ti.format(node))
    if trace and trace_fail and not t:
        ti.trace('fail ',name,ctx_kind,'reach:',
            ['%s:%s' % (id(z),ti.format(z)) for z in reach])
    return t
#@+node:ekr.20160603042613.1: *4* @@clean resolve.py
'''Resolve Python symbols.'''

@language python
@tabwidth -4

<< resolver imports>>
stats = {}
    # Timing stats.
n_pass_nodes = [None, 0, 0]
    # Only Passes 1 & 2 traverse nodes.
    # The sum is the number of calls to handleNodes
n_ignore = n_handleChildren = n_FunctionDef = n_null_nodes = 0
n_load = n_store = n_scopes = 0
test_scope = None

# Globally defined names which are not attributes of the builtins module, or
# are only present on some platforms.
_MAGIC_GLOBALS = ['__file__', '__builtins__', 'WindowsError']
@others
#@+node:ekr.20160603042613.2: *5* << resolver imports >>
import leo.core.leoGlobals as g # EKR
assert g

# import doctest
import os
import sys
import time # ekr

PY2 = sys.version_info < (3, 0)
PY32 = sys.version_info < (3, 3)    # Python 2.5 to 3.2
PY33 = sys.version_info < (3, 4)    # Python 2.5 to 3.3
builtin_vars = dir(__import__('__builtin__' if PY2 else 'builtins'))

try:
    import ast
except ImportError:     # Python 2.5
    import _ast as ast

    if 'decorator_list' not in ast.ClassDef._fields:
        # Patch the missing attribute 'decorator_list'
        ast.ClassDef.decorator_list = ()
        ast.FunctionDef.decorator_list = property(lambda s: s.decorators)

import messages # EKR
# from pyflakes import messages
#@+node:ekr.20160603042613.3: *5* top-level
#@+node:ekr.20160603042613.4: *6* getAlternatives

# Python >= 3.3 uses ast.Try instead of (ast.TryExcept + ast.TryFinally)
# EKR: used only by differentForks
if PY32:
    def getAlternatives(n):
        if isinstance(n, (ast.If, ast.TryFinally)):
            return [n.body]
        if isinstance(n, ast.TryExcept):
            return [n.body + n.orelse] + [[hdl] for hdl in n.handlers]
else:
    def getAlternatives(n):
        if isinstance(n, ast.If):
            return [n.body]
        if isinstance(n, ast.Try):
            return [n.body + n.orelse] + [[hdl] for hdl in n.handlers]
#@+node:ekr.20160603042613.5: *6* getNodeName

def getNodeName(node):
    # Returns node.id, or node.name, or None
    ### if node.__class__.__name__ == 'Name': g.trace(node, node.id)
    if hasattr(node, 'id'):     # One of the many nodes with an id
        return node.id
    if hasattr(node, 'name'):   # a ExceptHandler node
        return node.name
#@+node:ekr.20160603042613.6: *6* getNodeType (returns uppercase class name)

if PY2:
    def getNodeType(node_class):
        # workaround str.upper() which is locale-dependent
        ### return str(unicode(node_class.__name__).upper())
        return node_class.__name__.upper()
            # EKR: hehe: pyflakes complains about unicode.
else:
    def getNodeType(node_class):
        return node_class.__name__.upper()
#@+node:ekr.20160603042613.7: *6* iter_child_nodes

# def iter_child_nodes(node, omit=None, _fields_order=_FieldsOrder()):
    # """
    # Yield all direct child nodes of *node*, that is, all fields that
    # are nodes and all items of fields that are lists of nodes.
    # """
    # for name in _fields_order[node.__class__]:
        # if name == omit:
            # continue
        # field = getattr(node, name, None)
        # if isinstance(field, ast.AST):
            # yield field
        # elif isinstance(field, list):
            # for item in field:
                # yield item
#@+node:ekr.20160603042613.8: *6* unit_test

def unit_test(raise_on_fail=True):
    '''Run basic unit tests for this file.'''
    import _ast
    # import leo.core.leoAst as leoAst
    # Compute all fields to test.
    aList = sorted(dir(_ast))
    remove = [
        'Interactive', 'Suite', # Not necessary.
        'PyCF_ONLY_AST', # A constant,
        'AST', # The base class,
        # Grammar symbols...
        'expr', 'mod', 'stmt',
        'boolop', 'cmpop', 'unaryop', 'operator',
        # Field names...
        'expr_context', 'excepthandler', 'slice', 'withitem',
    ]
    aList = [z for z in aList if not z.startswith('_') and not z in remove]
    # Now test them.
    # Create a real tree so handleNode doesn't have to test for an empty tree.
    fn, s = '<test>', 'pass'
    tree = compile(s, fn, "exec", ast.PyCF_ONLY_AST)
    ft = Resolve(tree)
    operator_classes = (
        ast.cmpop, ast.boolop,
        ast.expr_context,
        ast.operator, ast.unaryop,
    )
    errors, nodes, operators = 0, 0, 0
    for z in aList:
        class_ = getattr(ast, z, None)
        # class_name = class_.__name__
        if hasattr(ft, z):
            nodes += 1
        elif issubclass(class_, operator_classes):
            operators += 1
        else:
            errors += 1
            print('Missing pyflakes visitor for: %s' % z)
    s = '%s node types, %s errors' % (nodes, errors)
    if raise_on_fail:
        assert not errors, s
    else:
        print(s)
#@+node:ekr.20160603042613.9: *5* resolve.Binding & Definition (not used)

# Binding and Definitions classes...
#@+node:ekr.20160603042613.10: *6* class Binding


class Binding(object):
    """
    Represents the binding of a value to a name.

    The Resolver uses this to keep track of which names have been bound and
    which names have not. See L{Assignment} for a special type of binding that
    is checked with stricter rules.

    @ivar used: pair of (L{Scope}, line-number) indicating the scope and
                line number that this binding was last used
    """
    kind = 'binding'
    def __init__(self, name, source):
        self.name = name
        self.source = source # EKR: a node.
        self.used = False # EKR: Set in helpers of Name.

    @others
#@+node:ekr.20160603042613.11: *7* Binding.__str__

def __str__(self):
    return self.name
#@+node:ekr.20160603042613.12: *7* Binding.__repr__

def __repr__(self):
    return '<Binding line %-2s %-6s %15s>' % (
        self.source.lineno,
        self.kind, 
        self.name,
    )
    # return '<%s object %r from line %r at 0x%x>' % (
        # self.__class__.__name__,
        # self.name,
        # self.source.lineno,
        # id(self))
#@+node:ekr.20160603042613.13: *7* Binding.redefines

def redefines(self, other):
    return isinstance(other, Definition) and self.name == other.name
#@+node:ekr.20160603042613.14: *6* class Definition (Binding)


class Definition(Binding):
    """
    A binding that defines a function or a class.
    """
#@+node:ekr.20160603042613.15: *6* class Importation (Definition)


class Importation(Definition):
    """
    A binding created by an import statement.

    @ivar fullName: The complete name given to the import statement,
        possibly including multiple dotted components.
    @type fullName: C{str}
    """
    kind = 'import'
    @others
#@+node:ekr.20160603042613.16: *7* __init__

def __init__(self, name, source):
    self.fullName = name
    self.redefined = []
    name = name.split('.')[0]
    super(Importation, self).__init__(name, source)
#@+node:ekr.20160603042613.17: *7* redefines

def redefines(self, other):
    if isinstance(other, Importation):
        return self.fullName == other.fullName
    else:
        # EKR: same as Binding.redefines.
        return isinstance(other, Definition) and self.name == other.name
#@+node:ekr.20160603042613.18: *6* class Argument (Binding)


class Argument(Binding):
    """
    Represents binding a name as an argument.
    """
    kind = 'arg' # EKR
#@+node:ekr.20160603042613.19: *6* class Assignment (Binding)


class Assignment(Binding):
    """
    Represents binding a name with an explicit assignment.

    The Resolver will raise warnings for any Assignment that isn't used. Also,
    the Resolver does not consider assignments in tuple/list unpacking to be
    Assignments, rather it treats them as simple Bindings.
    """
    kind = 'assign' # EKR
#@+node:ekr.20160603042613.20: *6* class FunctionDefinition (Definition)


class FunctionDefinition(Definition):
    
    kind = 'def' # EKR
#@+node:ekr.20160603042613.21: *6* class ClassDefinition (Definition)


class ClassDefinition(Definition):
    
    kind = 'class' # EKR
#@+node:ekr.20160603042613.22: *6* class ExportBinding (Binding)


class ExportBinding(Binding):
    """
    A binding created by an C{__all__} assignment.  If the names in the list
    can be determined statically, they will be treated as names for export and
    additional checking applied to them.

    The only C{__all__} assignment that can be recognized is one which takes
    the value of a literal list containing literal strings.  For example::

        __all__ = ["foo", "bar"]

    Names which are imported and not otherwise used but appear in the value of
    C{__all__} will not have an unused import warning reported for them.
    """
    kind = 'export' # EKR
    
    @others
#@+node:ekr.20160603042613.23: *7* __init__

def __init__(self, name, source, scope):
    if '__all__' in scope and isinstance(source, ast.AugAssign):
        self.names = list(scope['__all__'].names)
    else:
        self.names = []
    self.kind = 'import'
    if isinstance(source.value, (ast.List, ast.Tuple)):
        for node in source.value.elts:
            if isinstance(node, ast.Str):
                self.names.append(node.s)
    super(ExportBinding, self).__init__(name, source)
#@+node:ekr.20160603042613.24: *5* resolve.Scope classes

# Scope classes...
#@+node:ekr.20160603042613.25: *6* class Scope(dict)


class Scope(dict):

    importStarred = False
        # set to True when import * is found

    # EKR: Adding more data to scopes takes negligible time.
    def __init__(self, node, name, parent):
        self.name = name
        self.node = node
        self.parent = parent
        self.children = []
        if self.parent:
            self.parent.children.append(self)
        
    @others

    
#@+node:ekr.20160603042613.26: *7* Scope.__repr__

def __repr__(self):
    # scope_class = self.__class__.__name__
    # return '<%s at 0x%x %s>' % (scope_class, id(self), dict.__repr__(self))
    # return '%s %s' % (scope_class, self.name)
    return self.name
#@+node:ekr.20160603042613.27: *6* class ClassScope (Scope)


class ClassScope(Scope):

    def __init__(self, node, name, parent):
        Scope.__init__(self, node, name, parent)

#@+node:ekr.20160603042613.28: *6* class FunctionScope (Scope)


class FunctionScope(Scope):
    """
    I represent a name scope for a function.

    @ivar globals: Names declared 'global' in this function.
    """
    # EKR: only FunctionScope defines .globals ivar.
    usesLocals = False
    alwaysUsed = set([
        '__tracebackhide__',
        '__traceback_info__',
        '__traceback_supplement__'])

    @others
#@+node:ekr.20160603042613.29: *7* FunctionScope.__init__

def __init__(self, node, name, parent):

    Scope.__init__(self, node, name, parent)
    # Simplify: manage the special locals as globals
    self.globals = self.alwaysUsed.copy()
    self.returnValue = None     # First non-empty return
    self.isGenerator = False    # Detect a generator
#@+node:ekr.20160603042613.30: *7* FunctionScope.unusedAssignments

def unusedAssignments(self):
    """
    Return a generator for the assignments which have not been used.
    """
    # EKR: only called in FunctionScope.
    for name, binding in self.items():
        if (not binding.used and
            name not in self.globals and
            not self.usesLocals and
            isinstance(binding, Assignment)
        ):
            yield name, binding
#@+node:ekr.20160603042613.31: *6* class GeneratorScope (Scope)


class GeneratorScope(Scope):

    def __init__(self, node, name, parent):
        Scope.__init__(self, node, name, parent)
#@+node:ekr.20160603042613.32: *6* class ModuleScope (Scope)


class ModuleScope(Scope):

    def __init__(self, node, name, parent):
        assert parent is None, parent
            # Module's are the only scopes without a parent.
        name = 'Module: %s' % name
        Scope.__init__(self, node, name, parent)
#@+node:ekr.20160603042613.33: *5* class Resolve


class Resolve(object):
    """
    I check the cleanliness and sanity of Python code.

    @ivar _deferredFunctions: Tracking list used by L{deferFunction}.  Elements
        of the list are two-tuples.  The first element is the callable passed
        to L{deferFunction}.  The second element is a copy of the scope stack
        at the time L{deferFunction} was called.

    @ivar _deferredAssignments: Similar to C{_deferredFunctions}, but for
        callables which are deferred assignment checks.
    """

    nodeDepth = 0 # EKR: also set in ctor.
    offset = None
    traceTree = False

    builtIns = set(builtin_vars).union(_MAGIC_GLOBALS)
    _customBuiltIns = os.environ.get('PYFLAKES_BUILTINS')
    if _customBuiltIns:
        builtIns.update(_customBuiltIns.split(','))
    del _customBuiltIns

    @others
#@+node:ekr.20160603042613.34: *6* Resolve.__init__

def __init__(self, tree, filename='(none)', builtins=None):

    self.defs_list = []
    self.check_assign_list = []
    self.deadScopes = []
    self.messages = []
    self.nodeDepth = 0
    self.filename = filename
    # EKR: self.builtIns defined in class node.
    if builtins:
        self.builtIns = self.builtIns.union(builtins)
    self.exceptHandlers = [()]
    self.futuresAllowed = True
    self.root = tree
    self.pass_n = 1
    self.scope = None
    self.handleNode(tree, parent=None)
#@+node:ekr.20160603042613.35: *6* Resolve.popScope

def popScope(self):
    self.deadScopes.append(self.scopeStack.pop())
    self.scope = self.scopeStack[-1] if self.scopeStack else None
#@+node:ekr.20160603042613.36: *6* Resolve.pushScope

def pushScope(self, node, name, scopeClass):
    global n_scopes ; n_scopes += 1
    parent = self.scopeStack and self.scopeStack[-1] or None
    self.scope = scopeClass(node, name, parent)
    self.scopeStack.append(self.scope)
#@+node:ekr.20160603042613.37: *6* Resolve.report

def report(self, messageClass, *args, **kwargs):
    self.messages.append(messageClass(self.filename, *args, **kwargs))
#@+node:ekr.20160603042613.38: *6* Resolve.tree structure
#@+node:ekr.20160603042613.39: *6* Resolve.differentForks & helpers

def differentForks(self, lnode, rnode):
    """True, if lnode and rnode are located on different forks of IF/TRY"""
    ancestor = self.getCommonAncestor(lnode, rnode, self.root)
    parts = getAlternatives(ancestor)
    if parts:
        for items in parts:
            if self.descendantOf(lnode, items, ancestor) ^ \
               self.descendantOf(rnode, items, ancestor):
                return True
    return False
#@+node:ekr.20160603042613.40: *7* Resolve.descendantOf

def descendantOf(self, node, ancestors, stop):
    for a in ancestors:
        if self.getCommonAncestor(node, a, stop):
            return True
    return False
#@+node:ekr.20160603042613.41: *7* Resolve.getCommonAncestor

def getCommonAncestor(self, lnode, rnode, stop):
    if stop in (lnode, rnode) or not (hasattr(lnode, 'parent') and
                                      hasattr(rnode, 'parent')):
        return None
    if lnode is rnode:
        return lnode

    if (lnode.depth > rnode.depth):
        return self.getCommonAncestor(lnode.parent, rnode, stop)
    if (lnode.depth < rnode.depth):
        return self.getCommonAncestor(lnode, rnode.parent, stop)
    return self.getCommonAncestor(lnode.parent, rnode.parent, stop)
#@+node:ekr.20160603042613.42: *6* Resolve.getParent

def getParent(self, node):
    # Lookup the first parent which is not Tuple, List or Starred
    # EKR: handleNode sets node.parent.
    while True:
        node = node.parent
        if not hasattr(node, 'elts') and not hasattr(node, 'ctx'):
            return node
#@+node:ekr.20160603042613.43: *6* Resolve.addBinding (Remove??)

def addBinding(self, node, value):
    """
    Called when a binding is altered.

    - `node` is the statement responsible for the change
    - `value` is the new value, a Binding instance
    """
    trace = False and test_scope == 'test'
    # assert value.source in (node, node.parent):
    for scope in self.scopeStack[::-1]:
            # EKR: same as list(reversed(scopeStack))
        if value.name in scope:
            break
    existing = scope.get(value.name)

    if existing and not self.differentForks(node, existing.source):

        parent_stmt = self.getParent(value.source)
        if isinstance(existing, Importation) and isinstance(parent_stmt, ast.For):
            self.report(messages.ImportShadowedByLoopVar,
                        node, value.name, existing.source)

        elif scope is self.scope:
            # g.trace('====', scope, existing, existing.used, value)
            if (isinstance(parent_stmt, ast.comprehension) and
                not isinstance(self.getParent(existing.source),
                    (ast.For, ast.comprehension))
            ):
                self.report(messages.RedefinedInListComp,
                            node, value.name, existing.source)
            elif not existing.used and value.redefines(existing):
                # Redefines Class or Function.
                self.report(messages.RedefinedWhileUnused,
                            node, value.name, existing.source)

        elif isinstance(existing, Importation) and value.redefines(existing):
            existing.redefined.append(node)

    if value.name in self.scope:
        # then assume the rebound name is used as a global or within a loop
        value.used = self.scope[value.name].used

    self.scope[value.name] = value
    if trace: g.trace('    pass: %s %20r in %s' % (
        self.pass_n, value, scope.name))
        # getattr(self.scope, 'name', self.scope.__class__.__name__)))
#@+node:ekr.20160603042613.44: *6* Resolve.docstrings...
#@+node:ekr.20160603042613.45: *7* Resolve.isDocstring

def isDocstring(self, node):
    """
    Determine if the given node is a docstring, as long as it is at the
    correct place in the node tree.
    """
    return isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and
                                         isinstance(node.value, ast.Str))
#@+node:ekr.20160603042613.46: *7* Resolve.getDocstring

def getDocstring(self, node):
    if isinstance(node, ast.Expr):
        node = node.value
    if not isinstance(node, ast.Str):
        return (None, None)
    # Computed incorrectly if the docstring has backslash
    doctest_lineno = node.lineno - node.s.count('\n') - 1
    return (node.s, doctest_lineno)
#@+node:ekr.20160603042613.47: *6* Resolve.Node handlers

# EKR: Like visitors.
#@+node:ekr.20160603042613.48: *7* Resolve.handleNode (injects parent/depth fields)

def handleNode(self, node, parent):
    # EKR: this the general node visiter.
    global n_pass_nodes, n_null_nodes
    assert node, g.callers()
    # The following will fail unless 0 < self.pass_n < 3
    n_pass_nodes[self.pass_n] += 1
    if self.offset and getattr(node, 'lineno', None) is not None:
        node.lineno += self.offset[0]
        node.col_offset += self.offset[1]
    if (self.futuresAllowed and
        not (isinstance(node, (ast.Module, ast.ImportFrom)) or self.isDocstring(node))
             # EKR: works regardless of new_module.
    ):
        self.futuresAllowed = False
    # EKR: getCommonAncestor uses node.depth.
    self.nodeDepth += 1
    node.depth = self.nodeDepth
    node.parent = parent
    handler = getattr(self, node.__class__.__name__)
    handler(node)
    self.nodeDepth -= 1

# _getDoctestExamples = doctest.DocTestParser().get_examples
#@+node:ekr.20160603042613.49: *7* Resolve: default node handlers

if 1:
    @others
#@+node:ekr.20160603042613.50: *8* ft.operators & operands
#@+node:ekr.20160603042613.51: *9* ft.alias

def alias(self, node):
    pass
#@+node:ekr.20160603042613.52: *9* ft.arguments & arg

# 2: arguments = (expr* args, identifier? vararg,
#                 identifier? kwarg, expr* defaults)
# 3: arguments = (arg*  args, arg? vararg,
#                 arg* kwonlyargs, expr* kw_defaults,
#                 arg? kwarg, expr* defaults)

def arguments(self, node):

    for z in node.args:
        self.handleNode(z, node)
    if g.isPython3 and getattr(node, 'vararg', None):
        # An identifier in Python 2.
        self.handleNode(node.vararg, node)
    if getattr(node, 'kwonlyargs', None): # Python 3.
        assert isinstance(aList, (list, tuple)), repr(aList)
        for z in aList:
            self.handleNode(z, node)
    if getattr(node, 'kw_defaults', None): # Python 3.
        assert isinstance(aList, (list, tuple)), repr(aList)
        for z in aList:
            self.handleNode(z, node)
    if g.isPython3 and getattr(node, 'kwarg', None):
        # An identifier in Python 2.
        self.handleNode(node.kwarg, node)
    for z in node.defaults:
        self.handleNode(z, node)

# 3: arg = (identifier arg, expr? annotation)

def arg(self, node):
    if getattr(node, 'annotation', None):
        self.handleNode(node.annotation, node)
#@+node:ekr.20160603042613.53: *9* ft.Attribute

# Attribute(expr value, identifier attr, expr_context ctx)

def Attribute(self, node):
    self.handleNode(node.value, node)
    # self.handleNode(node.ctx, node)
#@+node:ekr.20160603042613.54: *9* ft.BinOp

# BinOp(expr left, operator op, expr right)

def BinOp(self, node):
    self.handleNode(node.left, node)
    # self.op_name(node.op)
    self.handleNode(node.right, node)
#@+node:ekr.20160603042613.55: *9* ft.BoolOp

# BoolOp(boolop op, expr* values)

def BoolOp(self, node):
    
    # self.handleNode(node.op)
    for z in node.values:
        self.handleNode(z, node)
#@+node:ekr.20160603042613.56: *9* ft.Bytes

def Bytes(self, node):
    pass
#@+node:ekr.20160603042613.57: *9* ft.Call

# Call(expr func, expr* args, keyword* keywords, expr? starargs, expr? kwargs)

def Call(self, node):
    # Call the nodes in token order.
    self.handleNode(node.func, node)
    for z in node.args:
        self.handleNode(z, node)
    for z in node.keywords:
        self.handleNode(z, node)
    if getattr(node, 'starargs', None):
        self.handleNode(node.starargs, node)
    if getattr(node, 'kwargs', None):
        self.handleNode(node.kwargs, node)
#@+node:ekr.20160603042613.58: *9* ft.Compare

# Compare(expr left, cmpop* ops, expr* comparators)

def Compare(self, node):
    # Visit all nodes in token order.
    self.handleNode(node.left, node)
    assert len(node.ops) == len(node.comparators)
    for i in range(len(node.ops)):
        if not isinstance(node.ops[i], ast.cmpop):
            # Could be a name, etc.
            self.handleNode(node.ops[i], node)
        if not isinstance(node.comparators[i], ast.cmpop):
            self.handleNode(node.comparators[i], node)
#@+node:ekr.20160603042613.59: *9* ft.comprehension

# comprehension (expr target, expr iter, expr* ifs)

def comprehension(self, node):
    # EKR: visit iter first.
    self.handleNode(node.iter, node) # An attribute.
    self.handleNode(node.target, node) # A name.
    for z in node.ifs:
        self.handleNode(z, node)
#@+node:ekr.20160603042613.60: *9* ft.Dict

# Dict(expr* keys, expr* values)

def Dict(self, node):
    # Visit all nodes in token order.
    assert len(node.keys) == len(node.values)
    for i in range(len(node.keys)):
        self.handleNode(node.keys[i], node)
        self.handleNode(node.values[i], node)
#@+node:ekr.20160603042613.61: *9* ft.Ellipsis

def Ellipsis(self, node):
    pass
#@+node:ekr.20160603042613.62: *9* ft.Expr

# Expr(expr value)

def Expr(self, node):
    self.handleNode(node.value, node)
#@+node:ekr.20160603042613.63: *9* ft.Expression

def Expression(self, node):
    '''An inner expression'''
    self.handleNode(node.body, node)
#@+node:ekr.20160603042613.64: *9* ft.ExtSlice

def ExtSlice(self, node):
    for z in node.dims:
        self.handleNode(z, node)
#@+node:ekr.20160603042613.65: *9* ft.ifExp (ternary operator)

# IfExp(expr test, expr body, expr orelse)

def IfExp(self, node):
    self.handleNode(node.body, node)
    self.handleNode(node.test, node)
    self.handleNode(node.orelse, node)
#@+node:ekr.20160603042613.66: *9* ft.Index

def Index(self, node):
    self.handleNode(node.value, node)
#@+node:ekr.20160603042613.67: *9* ft.keyword

# keyword = (identifier arg, expr value)

def keyword(self, node):
    # node.arg is a string.
    self.handleNode(node.value, node)
#@+node:ekr.20160603042613.68: *9* ft.List

# List(expr* elts, expr_context ctx)

def List(self, node):
    for z in node.elts:
        self.handleNode(z, node)
    # self.handleNode(node.ctx, node)

#@+node:ekr.20160603042613.69: *9* ft.NameConstant

def NameConstant(self, node): # Python 3 only.

    assert isinstance(node.value, (bool, str, None.__class__)), node.value.__class__.__name__
    # g.trace(node.value)
    # if node.value:
    #     self.handleNode(node.value, node)
    # self.handleNode(node.value, node)
    # s = repr(node.value)
    # return 'bool' if s in ('True', 'False') else s
#@+node:ekr.20160603042613.70: *9* ft.Num

def Num(self, node):
    pass
#@+node:ekr.20160603042613.71: *9* ft.Repr

# Python 2.x only
# Repr(expr value)

def Repr(self, node):
    self.handleNode(node.value, node)
#@+node:ekr.20160603042613.72: *9* ft.Set (new)

# Set(expr* elts)

def Set(self, node):
    for z in node.elts:
        self.handleNode(z, node)
        
#@+node:ekr.20160603042613.73: *9* ft.Slice

def Slice(self, node):
    if getattr(node, 'lower', None):
        self.handleNode(node.lower, node)
    if getattr(node, 'upper', None):
        self.handleNode(node.upper, node)
    if getattr(node, 'step', None):
        self.handleNode(node.step, node)
#@+node:ekr.20160603042613.74: *9* ft.Str

def Str(self, node):
    pass
#@+node:ekr.20160603042613.75: *9* ft.Subscript

# Subscript(expr value, slice slice, expr_context ctx)

def Subscript(self, node):
    # EKR: Visit value first.
    self.handleNode(node.value, node)
    self.handleNode(node.slice, node)
    # self.handleNode(node.ctx, node)
#@+node:ekr.20160603042613.76: *9* ft.Tuple

# Tuple(expr* elts, expr_context ctx)

def Tuple(self, node):
    for z in node.elts:
        self.handleNode(z, node)
    # self.handleNode(node.ctx, node)
#@+node:ekr.20160603042613.77: *9* ft.UnaryOp

# UnaryOp(unaryop op, expr operand)

def UnaryOp(self, node):
    # self.op_name(node.op)
    self.handleNode(node.operand, node)
#@+node:ekr.20160603042613.78: *8* ft.statements
#@+node:ekr.20160603042613.79: *9* ft.Assert

# Assert(expr test, expr? msg)

def Assert(self, node):
    self.handleNode(node.test, node)
    if node.msg:
        self.handleNode(node.msg, node)
#@+node:ekr.20160603042613.80: *9* ft.Assign

# Assign(expr* targets, expr value)

def Assign(self, node):
    # EKR: Visit value first.
    self.handleNode(node.value, node)
    for z in node.targets:
        self.handleNode(z, node)
    
#@+node:ekr.20160603042613.81: *9* ft.Break

def Break(self, node):
    pass
#@+node:ekr.20160603042613.82: *9* ft.Continue

def Continue(self, node):
    pass
#@+node:ekr.20160603042613.83: *9* ft.Delete

# Delete(expr* targets)

def Delete(self, node):
    for z in node.targets:
        self.handleNode(z, node)
#@+node:ekr.20160603042613.84: *9* ft.Exec

# Python 2.x only
# Exec(expr body, expr? globals, expr? locals)

def Exec(self, node):
    self.handleNode(node.body, node)
    if getattr(node, 'globals', None):
        self.handleNode(node.globals, node)
    if getattr(node, 'locals', None):
        self.handleNode(node.locals, node)
#@+node:ekr.20160603042613.85: *9* ft.For

# For(expr target, expr iter, stmt* body, stmt* orelse)

def For(self, node):
    
    # EKR: visit iter first.
    self.handleNode(node.iter, node)
    self.handleNode(node.target, node)
    for z in node.body:
        self.handleNode(z, node)
    for z in node.orelse:
        self.handleNode(z, node)

AsyncFor = For
#@+node:ekr.20160603042613.86: *9* ft.If

# If(expr test, stmt* body, stmt* orelse)

def If(self, node):

    if not isinstance(node.test, ast.operator):
        self.handleNode(node.test, node)
    for z in node.body:
        self.handleNode(z, node)
    for z in node.orelse:
        self.handleNode(z, node)
#@+node:ekr.20160603042613.87: *9* ft.Pass

def Pass(self, node):
    pass
#@+node:ekr.20160603042613.88: *9* ft.Print

# Python 2.x only
# Print(expr? dest, expr* values, bool nl)

def Print(self, node):
    if getattr(node, 'dest', None):
        self.handleNode(node.dest, node)
    for expr in node.values:
        self.handleNode(expr, node)
#@+node:ekr.20160603042613.89: *9* ft.Raise

# Raise(expr? type, expr? inst, expr? tback)    Python 3
# Raise(expr? exc, expr? cause)                 Python 2

def Raise(self, node):

    if g.isPython3:
        if getattr(node, 'exc', None):
            self.handleNode(node.exc, node)
        if getattr(node, 'cause', None):
            self.handleNode(node.cause, node)
    else:
        if getattr(node, 'type', None):
            self.handleNode(node.type, node)
        if getattr(node, 'inst', None):
            self.handleNode(node.inst, node)
        if getattr(node, 'tback', None):
            self.handleNode(node.tback, node)
#@+node:ekr.20160603042613.90: *9* ft.Starred (Python 3)

# Starred(expr value, expr_context ctx)

def Starred(self, node):

    self.handleNode(node.value, node)
#@+node:ekr.20160603042613.91: *9* ft.TryFinally

# TryFinally(stmt* body, stmt* finalbody)

def TryFinally(self, node):
    for field in ('body', 'finalbody'):
        for z in getattr(node, field, []):
            self.handleNode(z, node)

#@+node:ekr.20160603042613.92: *9* ft.While

# While(expr test, stmt* body, stmt* orelse)

def While(self, node):
    self.handleNode(node.test, node) # Bug fix: 2013/03/23.
    for z in node.body:
        self.handleNode(z, node)
    for z in node.orelse:
        self.handleNode(z, node)
#@+node:ekr.20160603042613.93: *9* ft.With

# 2:  With(expr context_expr, expr? optional_vars,
#          stmt* body)
# 3:  With(withitem* items,
#          stmt* body)
# withitem = (expr context_expr, expr? optional_vars)

def With(self, node):
    if getattr(node, 'context_expr', None):
        self.handleNode(node.context_expr, node)
    if getattr(node, 'optional_vars', None):
        self.handleNode(node.optional_vars, node)
    if getattr(node, 'items', None): # Python 3.
        for item in node.items:
            self.handleNode(item.context_expr, node)
            if getattr(item, 'optional_vars', None):
                try:
                    for z in item.optional_vars:
                        self.handleNode(z, node)
                except TypeError: # Not iterable.
                    self.handleNode(item.optional_vars, node)
    for z in node.body:
        self.handleNode(z, node)
        
AsyncWith = With
#@+node:ekr.20160603042613.94: *7* Resolve.AugAssign

def AugAssign(self, node):
    self.handleNodeLoad(node.target)
    self.handleNode(node.value, node)
    self.handleNode(node.target, node)

#@+node:ekr.20160603042613.95: *7* Resolve.ClassDef

def ClassDef(self, node):
    """
    Check names used in a class definition, including its decorators, base
    classes, and the body of its definition.  Additionally, add its name to
    the current scope.
    """
    for deco in node.decorator_list:
        self.handleNode(deco, node)
    for baseNode in node.bases:
        self.handleNode(baseNode, node)
    if not PY2:
        for keywordNode in node.keywords:
            self.handleNode(keywordNode, node)
    self.pushScope(node, node.name, ClassScope)
    # EKR: Unlike def's & lambda's, we *do* traverse the class's body.
    for stmt in node.body:
        self.handleNode(stmt, node)
    self.popScope()
    self.addBinding(node, ClassDefinition(node.name, node))
    
#@+node:ekr.20160603042613.96: *7* Resolve.ExceptHandler

# Python 2: ExceptHandler(expr? type, expr? name, stmt* body)
# Python 3: ExceptHandler(expr? type, identifier? name, stmt* body)

def ExceptHandler(self, node):
    # 3.x: in addition to handling children, we must handle the name of
    # the exception, which is not a Name node, but a simple string.
    if g.isPython3:
        if isinstance(node.name, str):
            self.handleNodeStore(node)
    elif node.name:
        self.handleNode(node.name, node)
    if node.type:
        self.handleNode(node.type, node)
    for z in node.body:
        self.handleNode(z, node)
#@+node:ekr.20160603042613.97: *7* Resolve.FunctionDef

def FunctionDef(self, node):
    
    global n_FunctionDef ; n_FunctionDef += 1
    for deco in node.decorator_list:
        self.handleNode(deco, node)
    self.Lambda(node) # EKR: defer's traversal of the body!
    self.addBinding(node, FunctionDefinition(node.name, node))
        
AsyncFunctionDef = FunctionDef
#@+node:ekr.20160603042613.98: *7* Resolve.GeneratorExp & comprehensions

# GeneratorExp(expr elt, comprehension* generators)
# SetComp(expr elt, comprehension* generators)

def GENERATOREXP(self, node):
    # EKR: always push a new scope.
    name = 'Generator: %s' % id(node)
    self.pushScope(node, name, GeneratorScope)
    if aft:
        # EKR: call generators first.
        for z in node.generators:
            self.handleNode(z, node)
        self.handleNode(node.elt, node)
    else:
        self.handleChildren(node)
    self.popScope()
    
if aft:
    SetComp = GeneratorExp = GENERATOREXP
    
# DictComp(expr key, expr value, comprehension* generators)
    
def DictComp(self, node):
    name = 'Generator: %s' % id(node)
    self.pushScope(node, name, GeneratorScope)
    if aft:
        # EKR: call generators first.
        for z in node.generators:
            self.handleNode(z, node)
        self.handleNode(node.key, node)
        self.handleNode(node.value, node)
    else:
        self.handleChildren(node)
    self.popScope()
    
def ListComp(self, node):
    # EKR: Push a new scope only in Python 3.
    name = 'Generator: %s' % id(node)
    if g.isPython3:
        self.pushScope(node, name, GeneratorScope)
    if aft:
        # EKR: call generators first.
        for z in node.generators:
            self.handleNode(z, node)
        self.handleNode(node.elt, node)
    else:
        self.handleChildren(node)
    if g.isPython3:
        self.popScope()

LISTCOMP = handleChildren if PY2 else GENERATOREXP
    
DICTCOMP = SETCOMP = GENERATOREXP
#@+node:ekr.20160603042613.99: *7* Resolve.Global & Nonlocal

def Global(self, node):
    """
    Keep track of globals declarations.
    """
    # In doctests, the global scope is an anonymous function at index 1.
    # global_scope_index = 1 if self.withDoctest else 0
    # global_scope = self.scopeStack[global_scope_index]
    
    ### Bad
    global_scope_index = 0
    global_scope = self.moduleScope

    # Ignore 'global' statement in global scope.
    if self.scope is not global_scope:

        # One 'global' statement can bind multiple (comma-delimited) names.
        for node_name in node.names:
            node_value = Assignment(node_name, node)

            # Remove UndefinedName messages already reported for this name.
            self.messages = [
                m for m in self.messages if
                    not isinstance(m, messages.UndefinedName) and
                    not isinstance(m, messages.ReturnOutsideFunction) and
                        # EKR: Real bug fix.
                    m.message_args[0] != node_name]

            # Bind name to global scope if it doesn't exist already.
            global_scope.setdefault(node_name, node_value)

            # Bind name to non-global scopes, but as already "used".
            node_value.used = (global_scope, node)
            for scope in self.scopeStack[global_scope_index + 1:]:
                scope[node_name] = node_value

Nonlocal = Global
#@+node:ekr.20160603042613.100: *7* Resolve.Import

def Import(self, node):
    for alias in node.names:
        name = alias.asname or alias.name
        importation = Importation(name, node)
        self.addBinding(node, importation)

#@+node:ekr.20160603042613.101: *7* Resolve.ImportFrom

def ImportFrom(self, node):
    if node.module == '__future__':
        if not self.futuresAllowed:
            self.report(messages.LateFutureImport,
                        node, [n.name for n in node.names])
    else:
        self.futuresAllowed = False

    for alias in node.names:
        if alias.name == '*':
            self.scope.importStarred = True
            self.report(messages.ImportStarUsed, node, node.module)
            continue
        name = alias.asname or alias.name
        importation = Importation(name, node)
        if node.module == '__future__':
            importation.used = (self.scope, node)
        self.addBinding(node, importation)
#@+node:ekr.20160603042613.102: *7* Resolve.Lambda

def Lambda(self, node):
    
    # Pass 1: visit *only* annotations and defaults.
    annotations, args, defaults = self.get_function_args(node)
    for child in annotations + defaults:
        if child:
            self.handleNode(child, node)
    # Queue pass 2: it must handle args
    ### To do: calculate args separately in pass 2.
    ### To do: don't pass scopeStack
    ### To be replaced...
    self.defs_list.append(
        g.Bunch(node=node,
                args=args,
                scopeStack=self.scopeStack[:],
        ))
#@+node:ekr.20160603042613.103: *8* Resolve.get_function_args

def get_function_args(self, node):

    annotations, args = [], []

    if PY2:
        def addArgs(arglist):
            for arg in arglist:
                if isinstance(arg, ast.Tuple):
                    addArgs(arg.elts)
                else:
                    args.append(arg.id)
        addArgs(node.args.args)
        defaults = node.args.defaults
    else:
        for arg in node.args.args + node.args.kwonlyargs:
            args.append(arg.arg)
            annotations.append(arg.annotation)
        defaults = node.args.defaults + node.args.kw_defaults

    # Only for Python3 FunctionDefs
    is_py3_func = hasattr(node, 'returns')

    for arg_name in ('vararg', 'kwarg'):
        wildcard = getattr(node.args, arg_name)
        if not wildcard:
            continue
        args.append(wildcard if PY33 else wildcard.arg)
        if is_py3_func:
            if PY33:  # Python 2.5 to 3.3
                argannotation = arg_name + 'annotation'
                annotations.append(getattr(node.args, argannotation))
            else:     # Python >= 3.4
                annotations.append(wildcard.annotation)

    if is_py3_func:
        annotations.append(node.returns)

    if len(set(args)) < len(args):
        for (idx, arg) in enumerate(args):
            if arg in args[:idx]:
                self.report(messages.DuplicateArgument, node, arg)
                
    return annotations, args, defaults
#@+node:ekr.20160603042613.104: *7* Resolve.Module

def Module(self, node):
    global stats
    self.scopeStack = []
    self.pushScope(node, self.filename, ModuleScope)
    self.moduleScope = self.scopeStack[-1]
    self.pass1(node)
        # Traverse all top-level symbols.
    self.pass2(node)
        # Traverse all def/lambda bodies.
    self.pass3(node)
        # Check deferred assignments.
    ### del self.scopeStack[1:]
    self.scopeStack = []
    self.scope = None
    self.deadScopes.append(self.moduleScope)
    self.checkDeadScopes()
        # Pass 4.
#@+node:ekr.20160603042613.105: *8* Resolve.pass1

def pass1(self, node):
    
    global stats
    t1 = time.clock()
    self.pass_n = 1
    # This looks like it is a full pass.
    # In fact, traversing of def/lambda happens in pass 2.
    for z in node.body:
        self.handleNode(z, node)
    t2 = time.clock()
    stats['pass1'] = stats.get('pass1', 0.0) + t2-t1
#@+node:ekr.20160603042613.106: *8* Resolve.pass2 (uses scopeStack)

def pass2(self, node):
    
    global stats
    t1 = time.clock()
    # Traverse the bodies of all def's & lambda's.
    self.pass_n = 2
    for bunch in self.defs_list:
        self.scanFunction(bunch.node, bunch.args, bunch.scopeStack)
            # The full scopeStack *is* needed, but we could recreate it...
    t2 = time.clock()
    stats['pass2'] = stats.get('pass2', 0.0) + t2-t1
#@+node:ekr.20160603042613.107: *9* Resolve.scanFunction (NEW)
def scanFunction(self, node, args, scopeStack):
    
    # This was in runFunction...
    assert self.pass_n == 2, self.pass_n
    assert isinstance(node, (ast.FunctionDef, ast.Lambda)), node
    is_def = isinstance(node, ast.FunctionDef)
    name = ('def: %s' % node.name) if is_def else ('Lambda: %s' % id(node))
    self.scopeStack = scopeStack
    self.pushScope(node, name, FunctionScope)
    for arg_name in args:
        self.addBinding(node, Argument(arg_name, node))
    # *Now* traverse the body of the def/lambda.
    if is_def:
        for stmt in node.body:
            self.handleNode(stmt, node)
    else:
        self.handleNode(node.body, node)
    # Defer checking assignments until pass 3.
    self.check_assign_list.append(self.scopeStack[-1])
    self.popScope()
#@+node:ekr.20160603042613.108: *8* Resolve.pass3

def pass3(self, node):
    
    global stats
    t1 = time.clock()
    self.pass_n = 3
        # handleNode will raise an exception if it is called.
    for scope in self.check_assign_list:
        self.checkAssignments(scope)
    t2 = time.clock()
    stats['pass3'] = stats.get('pass3', 0.0) + t2-t1
#@+node:ekr.20160603042613.109: *9* Resolve.checkAssignments (NEW)
def checkAssignments(self, scope):
    """
    Check to see if any assignments have not been used.
    """
    assert self.pass_n == 3, self.pass_n
    for name, binding in scope.unusedAssignments(): # was self.scope.
        self.report(messages.UnusedVariable, binding.source, name)
    
    if PY32:
        if scope.isGenerator and scope.returnValue: # was self.scope
            self.report(messages.ReturnWithArgsInsideGenerator,
                        scope.returnValue)
#@+node:ekr.20160603042613.110: *8* Resolve.checkDeadScopes (pass4)

def checkDeadScopes(self):
    """
    Look at scopes which have been fully examined and report names in them
    which were imported but unused.
    """
    global stats
    t1 = time.clock()
    self.pass_n = 4
        # This will raise an exception in handleNode if any nodes are visited.
    for scope in self.deadScopes:
        if isinstance(scope.get('__all__'), ExportBinding):
            all_names = set(scope['__all__'].names)
            if not scope.importStarred and \
               os.path.basename(self.filename) != '__init__.py':
                # Look for possible mistakes in the export list
                undefined = all_names.difference(scope)
                for name in undefined:
                    self.report(messages.UndefinedExport,
                                scope['__all__'].source, name)
        else:
            all_names = []
        # Look for imported names that aren't used.
        for value in scope.values():
            if isinstance(value, Importation):
                used = value.used or value.name in all_names
                if not used:
                    messg = messages.UnusedImport
                    self.report(messg, value.source, value.name)
                for node in value.redefined:
                    if isinstance(self.getParent(node), ast.For):
                        messg = messages.ImportShadowedByLoopVar
                    elif used:
                        continue
                    else:
                        messg = messages.RedefinedWhileUnused
                    self.report(messg, node, value.name, value.source)
    t2 = time.clock()
    stats['pass4'] = stats.get('pass4', 0.0) + t2-t1
#@+node:ekr.20160603042613.111: *7* Resolve.Name & helpers

def Name(self, node):
    """
    Handle occurrence of Name (which can be a load/store/delete access.)
    """
    # Locate the name in locals / function / globals scopes.
    if isinstance(node.ctx, (ast.Load, ast.AugLoad)):
        self.handleNodeLoad(node)
        if (node.id == 'locals' and
            isinstance(self.scope, FunctionScope) and
            isinstance(node.parent, ast.Call)
        ):
            # we are doing locals() call in current scope
            self.scope.usesLocals = True
                # EKR: why does this matter???
    elif isinstance(node.ctx, (ast.Store, ast.AugStore)):
        self.handleNodeStore(node)
    elif isinstance(node.ctx, ast.Del):
        self.handleNodeDelete(node)
    else:
        # must be a Param context -- this only happens for names in function
        # arguments, but these aren't dispatched through here
        raise RuntimeError("Got impossible expression context: %r" % (node.ctx,))
#@+node:ekr.20160603042613.112: *8* Resolve.handleNodeDelete

# EKR: ctx is Del.
def handleNodeDelete(self, node):

    def on_conditional_branch():
        """
        Return `True` if node is part of a conditional body.
        """
        current = getattr(node, 'parent', None)
        while current:
            if isinstance(current, (ast.If, ast.While, ast.IfExp)):
                return True
            current = getattr(current, 'parent', None)
        return False

    name = getNodeName(node)
    if not name:
        return

    if on_conditional_branch():
        # We can not predict if this conditional branch is going to
        # be executed.
        return

    if isinstance(self.scope, FunctionScope) and name in self.scope.globals:
        self.scope.globals.remove(name)
    else:
        try:
            del self.scope[name]
        except KeyError:
            self.report(messages.UndefinedName, node, name)
#@+node:ekr.20160603042613.113: *8* Resolve.handleNodeLoad

def handleNodeLoad(self, node):
    
    global n_load, test_scope
    trace = False and test_scope == 'test'
    # EKR: ctx is Load or AugLoad.
    name = getNodeName(node)
    if not name:
        return
    # try local scope
    try:
        self.scope[name].used = (self.scope, node)
    except KeyError:
        pass
    else:
        # EKR: the name is in the scope,
        # scope[name] is a Binding, and we have just marked it used.
        if trace: g.trace('pass: %s %40s in %s' % (
            # self.pass_n, name, repr(self.scope)))
            self.pass_n, self.scope[name], repr(self.scope)))
        return

    # EKR: Create a list of previous defining scopes.
    n_load += 1
    defining_scopes = (FunctionScope, ModuleScope, GeneratorScope) # EKR
    scopes = [scope for scope in self.scopeStack[:-1]
        if isinstance(scope, defining_scopes)]
            
    if isinstance(self.scope, GeneratorScope) and scopes[-1] != self.scopeStack[-2]:
        scopes.append(self.scopeStack[-2])

    # try enclosing function scopes and global scope
    importStarred = self.scope.importStarred
    for scope in reversed(scopes):
        importStarred = importStarred or scope.importStarred
        try:
            scope[name].used = (self.scope, node)
        except KeyError:
            pass
        else:
            if trace: g.trace('pass: %s %40s in %s' % (
            self.pass_n, scope[name], repr(scope)))
            return

    # look in the built-ins
    if importStarred or name in self.builtIns:
        return
    if name == '__path__' and os.path.basename(self.filename) == '__init__.py':
        # the special name __path__ is valid only in packages
        return

    # protected with a NameError handler?
    if 'NameError' not in self.exceptHandlers[-1]:
        self.report(messages.UndefinedName, node, name)
#@+node:ekr.20160603042613.114: *8* Resolve.handleNodeStore & helper

# EKR: called by Name and ExceptHandler.
# EKR: ctx is Store or AugStore.

def handleNodeStore(self, node):
    
    global n_store, test_scope
    trace = False and test_scope == 'test'
    name = getNodeName(node)
    if not name:
        return
    # if the name hasn't already been defined in the current scope
    if isinstance(self.scope, FunctionScope) and name not in self.scope:
        # for each function or module scope above us
        n_store += 1
        for scope in self.scopeStack[:-1]:
            if not isinstance(scope, (FunctionScope, ModuleScope)):
                continue
            # if the name was defined in that scope, and the name has
            # been accessed already in the current scope, and hasn't
            # been declared global
            used = name in scope and scope[name].used
            if trace: g.trace(name, 'used:', used, scope.name,
                'isGlobal', name  in self.scope.globals)
            if used and used[0] is self.scope and name not in self.scope.globals:
                # then it's probably a mistake
                self.report(messages.UndefinedLocal,
                            scope[name].used[1], name, scope[name].source)
                break

    parent_stmt = self.getParent(node)
    if (isinstance(parent_stmt, (ast.For, ast.comprehension)) or
        (parent_stmt != node.parent and not self.isLiteralTupleUnpacking(parent_stmt))
    ):
        binding = Binding(name, node)
    elif name == '__all__' and isinstance(self.scope, ModuleScope):
        binding = ExportBinding(name, node.parent, self.scope)
    else:
        binding = Assignment(name, node)
    self.addBinding(node, binding)
#@+node:ekr.20160603042613.115: *9* Resolve.isLiteralTupleUnpacking

def isLiteralTupleUnpacking(self, node):
    if isinstance(node, ast.Assign):
        for child in node.targets + [node.value]:
            if not hasattr(child, 'elts'):
                return False
        return True
#@+node:ekr.20160603042613.116: *7* Resolve.Return

def Return(self, node):
    
    # if isinstance(self.scope, ClassScope):
    if isinstance(self.scope, (ClassScope, ModuleScope)):
        # EKR: A real bug fix.
        self.report(messages.ReturnOutsideFunction, node)
        return
    if (
        node.value and
        hasattr(self.scope, 'returnValue') and
        not self.scope.returnValue
    ):
        self.scope.returnValue = node.value
    if node.value: # EKR
        self.handleNode(node.value, node)
#@+node:ekr.20160603042613.117: *7* Resolve.Try & TryExcept

def Try(self, node):
    handler_names = []
    # List the exception handlers
    for handler in node.handlers:
        if isinstance(handler.type, ast.Tuple):
            for exc_type in handler.type.elts:
                handler_names.append(getNodeName(exc_type))
        elif handler.type:
            handler_names.append(getNodeName(handler.type))
    # Memorize the except handlers and process the body
    self.exceptHandlers.append(handler_names)
    for child in node.body:
        self.handleNode(child, node)
    self.exceptHandlers.pop()
    # Process the other nodes: "except:", "else:", "finally:"
    for z in node.handlers:
        self.handleNode(z, node)
    for z in node.orelse:
        self.handleNode(z, node)
    for z in node.finalbody:
        self.handleNode(z, node)
        
TryExcept = Try
#@+node:ekr.20160603042613.118: *7* Resolve.Yield & YieldFrom and Await

def Yield(self, node):
    self.scope.isGenerator = True
    self.handleNode(node.value, node)
    
Await = YieldFrom = Yield
#@+node:ekr.20160518071043.1: *4* run (pylint-leo.py) (with Sherlock args)
# This was used to trace pylint, without success.

def run(theDir,fn,silent,rpython=False):
    '''Run pylint on fn.'''
    trace = False and not g.unitTesting
    # A little hack. theDir is empty for the -f option.
    if theDir:
        fn = os.path.join('leo',theDir,fn)
    rc_fn = os.path.abspath(os.path.join('leo','test','pylint-leo-rc.txt'))
    fn = os.path.abspath(fn)
    if not fn.endswith('.py'):
        fn = fn+'.py'
    if not os.path.exists(rc_fn):
        print('pylint rc file not found: %s' % (rc_fn))
        return 0.0
    if not os.path.exists(fn):
        print('file not found: %s' % (fn))
        return 0.0
    # Report the file name and one level of directory.
    path = g.os_path_dirname(fn)
    dirs = path.split(os.sep)
    theDir = dirs and dirs[-1] or ''
    if not silent:
        print('pylint-leo.py: %s%s%s' % (theDir,os.sep,g.shortFileName(fn)))
    # Create the required args.
    args = ','.join([
        "fn=r'%s'" % (fn),
        "rc=r'%s'" % (rc_fn),
    ])
    if scope == 'stc-test': # The --tt option.
        # Report that Sherlock is enabled.
        print('pylint-leo.py --tt: enabling Sherlock traces')
        print('pylint-leo.py --tt: patterns contained in plyint-leo.py')
        # Report the source code.
        s = open(fn).read()
        print('pylint-leo.py: source:\n\n%s\n' % s)
        # Add the optional Sherlock args.
        dots = True
        patterns = [
            << Sherlock patterns for pylint >>
        ]
        show_return = True
        stats_patterns = [
            << Sherlock stats patterns for pylint >>
        ]
        verbose = True
        args = args + ',' + ','.join([
            'dots=%s' % (dots),
            'patterns=%s' % (patterns),
            'sherlock=True',
            'show_return=%s' % (show_return),
            'stats_patterns=%s' % (stats_patterns),
            'verbose=%s' % (verbose),
        ])
    
    # Execute the command in a separate process.
    command = '%s -c "import leo.core.leoGlobals as g; g.run_pylint(%s)"' % (
        sys.executable, args)
    t1 = time.clock()
    g.execute_shell_commands(command)
    t2 = time.clock()
    if trace:
        g.trace('%4.2f %s' % (t2-t1, g.shortFileName(fn)))
#@+node:ekr.20160518071043.2: *5* << Sherlock patterns for pylint >>
@nobeautify

# Note:  A leading * is never valid: change to .*

'+.*infer*',
    # '+.*infer_name',
    '+.*infer_stmts',

'+YES::__init__',

# '+TypeChecker::add_message',
    # '+.*add_message',
    # '+PyLinter::add_message',
    # '+TextReporter::add_message'

# '+.*visit*',
    # '+TypeChecker::visit_getattr',
    # '+.*visit_class',
    # '+Basic*::visit_*',

# '+.*__init__',
    # '+Instance::__init__',
    # '+Class::__init__',
    # '+Module::__init__',
    # '+Function::__init__',

# '+:.*typecheck.py',
# '+:.*inference.py',
# '+:.*variables.py',

# Old traces

# '+:.*bases.py',
# '+.*path_raise_wrapper',

# Enable everything.
# # '+.*',

# # Disable entire files.
# # '-:.*\\lib\\.*', # Disables everything.

# # Pylint files.
# #'-:.*base.py',
# #'-:.*bases.py',
# '-:.*builder.py',
# '-:.*__init__.py',
# '-:.*format.py',
# '-:.*interface.py', # implements
# '-:.*rebuilder.py',
# #'-:.*scoped_nodes',
# # General library files.
# '-:.*leoGlobals.py',
# '-:.*codecs.py',
# '-:.*config.py',
# '-:.*configuration.py',
# '-:.*ConfigParser.py',
# '-:.*copy\.py',
# '-:.*gettext.py',
# '-:.*genericpath.py',
# '-:.*graph.py',
# '-:.*locale.py',
# '-:.*optik_ext.py',
# '-:.*optparse.py',
# '-:.*os.py',
# '-:.*ntpath.py',
# '-:.*pickle.py',
# '-:.*re.py',
# '-:.*similar.py',
# '-:.*shlex.py',
# '-:.*sre_compile.py',
# '-:.*sre_parse.py',
# '-:.*string_escape.py',
# '-:.*text.py',
# '-:.*threading.py',
# '-:.*tokenize.py',
# '-:.*utils.py',

# # Enable entire files.
# # '+:.*base.py',
# # '+:.*bases.py',
# # '+:.*classes.py',
# # '+:.*design_analysis.py',
# # '+:.*format.py',
# # '+:.*inference.py',
# # '+:.*logging.py',
# # '+:.*mixins.py',
# # '+:.*newstyle.py',
# # '+:.*node_classes.py',
# # '+:.*protocols.py',
# # '+:.*scoped_nodes.py',
# # '+:.*typecheck.py',
# # '+:.*variables.py',

# # Disable individual methods.
# '-close', # __init__.py
# '-collect_block_lines', '-\<genexpr\>','-.*option.*','-.*register_checker','-set_reporter', # lint.py
# '-frame','-root','-scope', # scoped_nodes
# '-register', # various files.

# # '-abspath','-normpath','-isstring','-normalize',
# # '-splitext','-_splitext','-splitdrive','-splitstrip',
# # '-.*option.*','-get','-set_option',
# # '-unquote','-convert','-interpolate','-_call_validator', # compile stuff.
# # '-_compile.*','-compile_.*','-_code','-identifyfunction', # compile stuff.
# # '-_parse.*','-set_parser','-set_conflict_handler',
# # '-append','-match',
# # '-isbasestring',
# # '-save.*','-memoize','-put',

# # '-persistent_id',
# # '-__next',
# # '-nodes_of_class',
# # '-__.*',
# # '-_check.*',
# # '-_.*',
# # '-load_.*',
#@+node:ekr.20160518071043.3: *5* << Sherlock stats patterns for pylint >>
@nobeautify

# '+.*__init__',
    # astroid.bases.py
    '+BoundMethod::__init__',
    '+InferenceContext::__init__',
    '+Instance::__init__',
    '+UnboundMethod::__init__',
    # astroid.node_classes.py
    '+Arguments::__init__',
    '+CallFunc::__init__',
    '+Const::__init__',
    # astroid.scoped_nods.py
    '+Class::__init__',
    '+Function::__init__',
    '+Module::__init__',
#@+node:ekr.20170116050723.1: *3* Syntax coloring...
#@+node:ekr.20170128133257.1: *4* OLD class PythonQSyntaxHighlighter
class PythonQSyntaxHighlighter(object):
    '''
    Python implementation of QtGui.QSyntaxHighlighter.

    This allows incremental coloring of text at idle time, trading slower
    overall speed for much faster response time.
    '''
    @others
#@+node:ekr.20170128133257.2: *5* pqsh.Birth & death
def __init__(self, parent, c=None, delay=10, limit=50):
    '''
    Ctor for QSyntaxHighlighter class.
    Parent is a QTextDocument or QTextEdit: it becomes the owner of the QSyntaxHighlighter.
    '''
    # g.trace('(PythonQSyntaxBrowser)', parent)
    # Ivars corresponding to QSH ivars...
    self.c = c # The commander.
    self.cb = None # The current block: a QTextBlock.
    self.d = None # The QTextDocument attached to this colorizers.
    self.formats = [] # An array of QTextLayout.FormatRange objects.
    self.inReformatBlocks = False
    self.rehighlightPending = False
    # Ivars for reformatBlocks and idle_handler...
    self.idle_active = False # True if the idle_handler should colorize.
    self.r_block = None # The block to be colorized.
    self.r_end = None # The ultimate ending position.
    self.r_delay = delay # The waiting time, in msec. for self.timer.
    self.r_force = False # True if the next block must be recolored.
    self.r_limit = limit # The max number of lines to color at one time.
    self.timer = g.IdleTime(
        handler=self.idle_handler,
        delay=self.r_delay,
        tag='pqsh.idle_handler')
    # Attach the parent's QTextDocument and set self.d.
    self.setDocument(parent)
#@+node:ekr.20170128133257.3: *5* pqsh.Entry points
#@+node:ekr.20170128133257.4: *6* pqsh.kill
def kill(self):
    '''Kill any queued coloring.'''
    trace = False and not g.unitTesting
    if self.idle_active:
        if trace: g.trace('(PythonQSyntaxHighlighter)')
        self.idle_active = False
        if self.timer:
            self.timer.stop()
#@+node:ekr.20170128133257.5: *6* pqsh.rehighlight
def rehighlight(self):
    '''Color the whole document.'''
    trace = False and not g.unitTesting
    c, d = self.c, self.d
    if d:
        n = d.characterCount()
        if 0 < c.max_pre_loaded_body_chars < n:
            if trace: g.trace('big text: no color', c.p.h)
        elif n > 1000*10:

            def rehightlight_callback(c=c, d=d, p=c.p, self=self):
                if p == c.p:
                    if trace: g.trace('=====', n, p.h)
                    cursor = QtGui.QTextCursor(d)
                    self.rehighlight_helper(cursor, QtGui.QTextCursor.End)
                else:
                    if trace: g.trace('node not selected', p.h)

            QtCore.QTimer.singleShot(200, rehightlight_callback)
        else:
            if trace: g.trace('(pqsh)', n)
            cursor = QtGui.QTextCursor(d)
            self.rehighlight_helper(cursor, QtGui.QTextCursor.End)
#@+node:ekr.20170128133257.6: *6* pqsh.rehighlightBlock (not used)
def rehighlightBlock(self, block):
    '''Reapplies the highlighting to the given QTextBlock block.'''
    d = self.d
    if d and self.is_valid(block) and block.document() == d:
        self.rehighlightPending = d.rehighlightPending
        cursor = QtGui.QTextCursor(block)
        g.trace(g.u(block.text()))
        self.rehighlight_helper(cursor, QtGui.QTextCursor.EndOfBlock)
        if self.rehighlightPending:
            d.rehighlightPending = self.rehighlightPending
#@+node:ekr.20170128133257.7: *6* pqsh.rehighlight_helper
def rehighlight_helper(self, cursor, operation):
        # QtGui.QTextCursor &cursor
        # QtGui.QTextCursor.MoveOperation operation
    self.inReformatBlocks = True
    try:
        cursor.beginEditBlock()
        from_ = cursor.position()
        cursor.movePosition(operation)
        self.reformatBlocks(from_, 0, cursor.position() - from_)
        cursor.endEditBlock()
    finally:
        self.inReformatBlocks = False
#@+node:ekr.20170128133257.8: *5* pqsh.Getters & Setters
#@+node:ekr.20170128133257.9: *6* pqsh.currentBlock & currentBlockUserData
def currentBlock(self):
    '''Returns the current text block.'''
    return self.cb

def currentBlockUserData(self):
    '''Returns the QTextBlockUserData object attached to the current text block.'''
    return self.cb.userData() if self.is_valid(self.cb) else None
#@+node:ekr.20170128133257.10: *6* pqsh.currentBlockState & previousBlockState
def currentBlockState(self):
    '''Returns the state of the current block or -1.'''
    return self.cb.userState() if self.is_valid(self.cb) else - 1

def previousBlockState(self):
    '''Returns the end state previous text block or -1'''
    if self.is_valid(self.cb):
        previous = self.cb.previous()
        return previous.userState() if self.is_valid(previous) else - 1
    else:
        return -1
#@+node:ekr.20170128133257.11: *6* pqsh.document
def document(self):
    '''Returns the QTextDocument on which this syntax highlighter is installed.'''
    return self.d
#@+node:ekr.20170128133257.12: *6* pqsh.format
def format(self, pos):
    '''Return the format at the given position in the current text block.'''
    if 0 <= pos < len(self.formatChanges):
        return self.formatChanges[pos]
    else:
        return QtGui.QTextCharFormat()
#@+node:ekr.20170128133257.13: *6* pqsh.setCurrentBlockState & setCurrentBlockUserData
def setCurrentBlockState(self, newState):
    '''Sets the state of the current text block.'''
    if self.is_valid(self.cb):
        self.cb.setUserState(newState)

def setCurrentBlockUserData(self, data):
    '''Set the user data of the current text block.'''
    if self.is_valid(self.cb):
        self.cb.setUserData(data)
#@+node:ekr.20170128133257.14: *6* pqsh.setDocument
def setDocument(self, parent):
    '''Install self on the given QTextDocument.'''
    d = self.d
    if d:
        d.contentsChange.disconnect()
        cursor = QtGui.QTextCursor(d)
        cursor.beginEditBlock()
        blk = d.begin()
        while self.is_valid(blk): # blk: QTextBlock
            blk.layout().clearAdditionalFormats()
            blk = blk.next()
        cursor.endEditBlock()
    self.d = d = parent.document()
    assert isinstance(d, QtGui.QTextDocument), d
    if d:
        d.contentsChange.connect(self.q_reformatBlocks)
        d.rehighlightPending = True
            # Set d's pending flag.
        QtCore.QTimer.singleShot(0, self.delayedRehighlight)
#@+node:ekr.20170128133257.15: *6* pqsh.setFormat (start,count,format)
def setFormat(self, start, count, format):
    '''Remember the requested formatting.'''
    trace = False and not g.unitTesting
    verbose = False
    if start >= 0:
        r = QtGui.QTextLayout.FormatRange()
        r.start, r.length, r.format = start, count, format
        self.formats.append(r)
        if trace and verbose: g.trace('%3s %3s %s %s' % (
            start, count, self.format_to_color(format), self.cb.text()))
    elif trace:
        g.trace('bad start value', repr(start), g.callers())
# Not used by Leo...
# def setFormat(self,start,count,color):
    # format = QTextCharFormat()
    # format.setForeground(color)
    # setFormat(start,count,format)
# def setFormat(self,start,count,font):
    # format = QTextCharFormat()
    # format.setFont(font)
    # self.setFormat(start,count,format)
#@+node:ekr.20170128133257.16: *5* pqsh.Helpers
# These helpers are the main reason QSyntaxHighlighter exists.
# Getting this code exactly right is the main challenge for PythonQSyntaxHighlighter.
#@+node:ekr.20170128133257.17: *6* pqsh.applyFormatChanges
def applyFormatChanges(self):
    '''Apply self.formats to the current layout.'''
    if self.formats:
        layout = self.cb.layout()
        layout.setAdditionalFormats(self.formats)
        self.formats = []
        self.d.markContentsDirty(self.cb.position(), self.cb.length())
#@+node:ekr.20170128133257.18: *6* pqsh.delayedRehighlight
def delayedRehighlight(self): # inline
    '''Queued rehighlight.'''
    # g.trace('=====',self.rehighlightPending)
    if self.rehighlightPending:
        self.rehighlightPending = False
        self.rehighlight()
#@+node:ekr.20170128133257.19: *6* pqsh.format_to_color
def format_to_color(self, format):
    '''Return the foreground color of the given character format.'''
    return str(format.foreground().color().name())
#@+node:ekr.20170128133257.20: *6* pqsh.highlightBlock
def highlightBlock(self, s):
    g.trace('must be defined in subclasses.' '')
#@+node:ekr.20170128133257.21: *6* pqsh.idle_handler
def idle_handler(self, timer):
    trace = False and not g.unitTesting
    verbose = True
    if not self.idle_active:
        # Shortcut everything else.
        return
    c = self.c
    if trace and verbose:
        s = g.u(self.r_block.text()).lstrip()
        g.trace('force: %5s s: %s' % (self.r_force, s[: 20]))
    # This is defensive code.  Apparently it is never needed.
    # This is the only place c is used, so the c argument to the ctor is optional.
    if c:
        if c.p == self.r_p:
            self.reformat_blocks_helper()
        elif trace:
            self.idle_active = False
            g.trace('node changed: old: %s new: %s' % (
                self.r_p and self.r_p.h[: 10], c.p and c.p.h[: 10]))
#@+node:ekr.20170128133257.22: *6* pqsh.is_valid
def is_valid(self, obj):
    return obj and obj.isValid()
#@+node:ekr.20170128133257.23: *6* pqsh.q_reformatBlocks
def q_reformatBlocks(self, from_, charsRemoved, charsAdded):
    if not self.inReformatBlocks:
        # g.trace(from_,charsRemoved,charsAdded)
        self.reformatBlocks(from_, charsRemoved, charsAdded)
#@+node:ekr.20170128133257.24: *6* pqsh.reformat_blocks_helper
def reformat_blocks_helper(self):
    '''The common code shared by reformatBlocks and idle_handler.'''
    block = self.r_block
    n, start = 0, False
    while self.is_valid(block) and (block.position() < self.r_end or self.r_force):
        n += 1
        if n >= self.r_limit > 0 and self.timer:
            start = True
            break
        else:
            before_state = block.userState()
            self.reformatBlock(block)
            self.r_force = block.userState() != before_state
            block = self.r_block = block.next()
    self.formatChanges = []
    self.idle_active = start
    if self.timer and start:
        self.timer.start()
    elif self.timer:
        self.timer.stop()
        # g.trace('--end',g.app.allow_see,self.c.p and self.c.p.h or None)
        # Fix bug 78: find-next match not always scrolled into view.
        # https://github.com/leo-editor/leo-editor/issues/78
        w = self.c.frame.body.wrapper
        if g.app.allow_delayed_see and w:
            w.seeInsertPoint()
        g.app.allow_delayed_see = False
#@+node:ekr.20170128133257.25: *6* pqsh.reformatBlock
def reformatBlock(self, block):
    trace = False and not g.unitTesting
    if self.is_valid(self.cb) and not isQt5:
        g.trace('can not happen: called recursively')
    else:
        self.cb = block
        self.formats = []
        for i in range(block.length()):
            r = QtGui.QTextLayout.FormatRange()
            r.start, r.length, r.format = i, 1, QtGui.QTextCharFormat()
            self.formats.append(r)
        if trace: g.trace(str(block.text()))
        self.highlightBlock(block.text())
        self.applyFormatChanges()
        self.cb = QtGui.QTextBlock()
#@+node:ekr.20170128133257.26: *6* pqsh.reformatBlocks (main line)
def reformatBlocks(self, from_, charsRemoved, charsAdded):
    '''Main line: Reformat the lines in the indicated range.'''
    self.rehighlightPending = False
    block = self.d.findBlock(from_)
    if not self.is_valid(block):
        return
    # Set the ivars for reformat_blocks_helper.
    adjust = 1 if charsRemoved > 0 else 0
    lastBlock = self.d.findBlock(from_ + charsAdded + adjust)
    if self.is_valid(lastBlock):
        self.r_end = lastBlock.position() + lastBlock.length()
    else:
        self.r_end = self.d.blockCount()
    self.r_block = block
    self.r_p = self.c.p.copy()
    self.r_force = False
    # Delegate the colorizing to shared helper.
    self.reformat_blocks_helper()
#@+node:ekr.20140906143232.18697: *4* OLD class PythonLexer
# Stuck: regardless of class: there seems to be no way to force a recolor.
if Qsci:

    class PythonLexer(Qsci.QsciLexerCustom):
        '''A subclass of the Python lexer that colorizers section references.'''

        def __init__(self, parent=None):
            '''Ctor for PythonLexer class.'''
            Qsci.QsciLexerCustom.__init__(self, parent)
                # Init the base class.
            self.lexer = None
            self.parent = parent
            self.tag = '(PythonLexer)'

        def setStringsOverNewlineAllowed(self, aBool):
            pass

        def description(self, style):
            return self.tag

        def setStyling(self, length, style):
            g.trace(self.tag, length, style)

        def styleText(self, start, end):
            '''Style the text from start to end.'''
            g.trace(self.tag, start, end)
            self.lexer = Qsci.QsciLexerPython(parent=self.parent)
            self.lexer.setStringsOverNewlineAllowed(True)
            # self.lexer.styleText(start,end)

        def configure_lexer(self):
            '''Configure the QScintilla lexer.'''
            lexer = self
            # To do: use c.config setting.
            # pylint: disable=no-member
            font = QtGui.QFont("DejaVu Sans Mono", 14)
            lexer.setFont(font)
#@-all
#@@nosearch
#@-leo
