[Mercurial Tests] Added hg distributive for windows and unix-like.
authorKirill Likhodedov <kirill.likhodedov@jetbrains.com>
Mon, 7 Jun 2010 13:29:42 +0000 (17:29 +0400)
committerKirill Likhodedov <kirill.likhodedov@jetbrains.com>
Mon, 7 Jun 2010 13:29:42 +0000 (17:29 +0400)
143 files changed:
plugins/hg4idea/testData/bin/hg [new file with mode: 0755]
plugins/hg4idea/testData/bin/hg.exe [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/__init__.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/acl.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/bookmarks.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/bugzilla.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/children.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/churn.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/color.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/__init__.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/bzr.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/common.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/convcmd.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/cvs.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/cvsps.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/darcs.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/filemap.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/git.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/gnuarch.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/hg.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/monotone.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/p4.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/subversion.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/convert/transport.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/extdiff.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/fetch.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/gpg.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/graphlog.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/hgcia.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/hgk.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/highlight/__init__.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/highlight/highlight.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/inotify/__init__.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/inotify/client.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/inotify/common.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/inotify/linux/__init__.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/inotify/linux/_inotify.c [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/inotify/linux/watcher.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/inotify/linuxserver.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/inotify/server.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/interhg.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/keyword.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/mq.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/notify.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/pager.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/parentrevspec.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/patchbomb.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/progress.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/purge.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/rebase.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/record.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/relink.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/schemes.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/share.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/transplant.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/win32mbcs.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/win32text.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/zeroconf/Zeroconf.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/hgext/zeroconf/__init__.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/library.zip [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/__init__.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/__version__.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/ancestor.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/archival.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/base85.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/bdiff.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/bundlerepo.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/byterange.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/changegroup.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/changelog.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/cmdutil.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/commands.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/config.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/context.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/copies.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/demandimport.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/diffhelpers.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/dirstate.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/dispatch.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/encoding.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/error.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/extensions.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/fancyopts.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/filelog.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/filemerge.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/graphmod.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hbisect.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/help.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hg.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hgweb/__init__.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hgweb/common.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hgweb/hgweb_mod.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hgweb/hgwebdir_mod.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hgweb/protocol.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hgweb/request.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hgweb/server.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hgweb/webcommands.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hgweb/webutil.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hgweb/wsgicgi.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/hook.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/httprepo.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/i18n.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/ignore.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/keepalive.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/localrepo.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/lock.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/lsprof.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/lsprofcalltree.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/mail.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/manifest.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/match.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/mdiff.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/merge.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/minirst.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/mpatch.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/node.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/osutil.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/parsers.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/patch.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/posix.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/repair.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/repo.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/revlog.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/simplemerge.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/sshrepo.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/sshserver.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/statichttprepo.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/store.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/streamclone.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/strutil.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/subrepo.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/tags.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/templatefilters.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/templatekw.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/templater.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/transaction.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/ui.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/url.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/util.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/verify.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/win32.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/mercurial/windows.py [new file with mode: 0644]
plugins/hg4idea/testData/bin/python26.dll [new file with mode: 0644]

diff --git a/plugins/hg4idea/testData/bin/hg b/plugins/hg4idea/testData/bin/hg
new file mode 100755 (executable)
index 0000000..cbaef48
--- /dev/null
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+#
+# mercurial - scalable distributed SCM
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# enable importing on demand to reduce startup time
+try:
+    from mercurial import demandimport; demandimport.enable()
+except ImportError:
+    import sys
+    sys.stderr.write("abort: couldn't find mercurial libraries in [%s]\n" %
+                     ' '.join(sys.path))
+    sys.stderr.write("(check your install and PYTHONPATH)\n")
+    sys.exit(-1)
+
+import sys
+import mercurial.util
+import mercurial.dispatch
+
+for fp in (sys.stdin, sys.stdout, sys.stderr):
+    mercurial.util.set_binary(fp)
+
+mercurial.dispatch.run()
diff --git a/plugins/hg4idea/testData/bin/hg.exe b/plugins/hg4idea/testData/bin/hg.exe
new file mode 100644 (file)
index 0000000..3d05f9b
Binary files /dev/null and b/plugins/hg4idea/testData/bin/hg.exe differ
diff --git a/plugins/hg4idea/testData/bin/hgext/__init__.py b/plugins/hg4idea/testData/bin/hgext/__init__.py
new file mode 100644 (file)
index 0000000..fdffa2a
--- /dev/null
@@ -0,0 +1 @@
+# placeholder
diff --git a/plugins/hg4idea/testData/bin/hgext/acl.py b/plugins/hg4idea/testData/bin/hgext/acl.py
new file mode 100644 (file)
index 0000000..d8a9fa3
--- /dev/null
@@ -0,0 +1,106 @@
+# acl.py - changeset access control for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''hooks for controlling repository access
+
+This hook makes it possible to allow or deny write access to portions
+of a repository when receiving incoming changesets.
+
+The authorization is matched based on the local user name on the
+system where the hook runs, and not the committer of the original
+changeset (since the latter is merely informative).
+
+The acl hook is best used along with a restricted shell like hgsh,
+preventing authenticating users from doing anything other than
+pushing or pulling. The hook is not safe to use if users have
+interactive shell access, as they can then disable the hook.
+Nor is it safe if remote users share an account, because then there
+is no way to distinguish them.
+
+To use this hook, configure the acl extension in your hgrc like this::
+
+  [extensions]
+  acl =
+
+  [hooks]
+  pretxnchangegroup.acl = python:hgext.acl.hook
+
+  [acl]
+  # Check whether the source of incoming changes is in this list
+  # ("serve" == ssh or http, "push", "pull", "bundle")
+  sources = serve
+
+The allow and deny sections take a subtree pattern as key (with a glob
+syntax by default), and a comma separated list of users as the
+corresponding value. The deny list is checked before the allow list
+is. ::
+
+  [acl.allow]
+  # If acl.allow is not present, all users are allowed by default.
+  # An empty acl.allow section means no users allowed.
+  docs/** = doc_writer
+  .hgtags = release_engineer
+
+  [acl.deny]
+  # If acl.deny is not present, no users are refused by default.
+  # An empty acl.deny section means all users allowed.
+  glob pattern = user4, user5
+   ** = user6
+'''
+
+from mercurial.i18n import _
+from mercurial import util, match
+import getpass, urllib
+
+def buildmatch(ui, repo, user, key):
+    '''return tuple of (match function, list enabled).'''
+    if not ui.has_section(key):
+        ui.debug('acl: %s not enabled\n' % key)
+        return None
+
+    pats = [pat for pat, users in ui.configitems(key)
+            if users == '*' or user in users.replace(',', ' ').split()]
+    ui.debug('acl: %s enabled, %d entries for user %s\n' %
+             (key, len(pats), user))
+    if pats:
+        return match.match(repo.root, '', pats)
+    return match.exact(repo.root, '', [])
+
+
+def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
+    if hooktype != 'pretxnchangegroup':
+        raise util.Abort(_('config error - hook type "%s" cannot stop '
+                           'incoming changesets') % hooktype)
+    if source not in ui.config('acl', 'sources', 'serve').split():
+        ui.debug('acl: changes have source "%s" - skipping\n' % source)
+        return
+
+    user = None
+    if source == 'serve' and 'url' in kwargs:
+        url = kwargs['url'].split(':')
+        if url[0] == 'remote' and url[1].startswith('http'):
+            user = urllib.unquote(url[3])
+
+    if user is None:
+        user = getpass.getuser()
+
+    cfg = ui.config('acl', 'config')
+    if cfg:
+        ui.readconfig(cfg, sections = ['acl.allow', 'acl.deny'])
+    allow = buildmatch(ui, repo, user, 'acl.allow')
+    deny = buildmatch(ui, repo, user, 'acl.deny')
+
+    for rev in xrange(repo[node], len(repo)):
+        ctx = repo[rev]
+        for f in ctx.files():
+            if deny and deny(f):
+                ui.debug('acl: user %s denied on %s\n' % (user, f))
+                raise util.Abort(_('acl: access denied for changeset %s') % ctx)
+            if allow and not allow(f):
+                ui.debug('acl: user %s not allowed on %s\n' % (user, f))
+                raise util.Abort(_('acl: access denied for changeset %s') % ctx)
+        ui.debug('acl: allowing changeset %s\n' % ctx)
diff --git a/plugins/hg4idea/testData/bin/hgext/bookmarks.py b/plugins/hg4idea/testData/bin/hgext/bookmarks.py
new file mode 100644 (file)
index 0000000..9f6c0c3
--- /dev/null
@@ -0,0 +1,334 @@
+# Mercurial extension to provide the 'hg bookmark' command
+#
+# Copyright 2008 David Soria Parra <dsp@php.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''track a line of development with movable markers
+
+Bookmarks are local movable markers to changesets. Every bookmark
+points to a changeset identified by its hash. If you commit a
+changeset that is based on a changeset that has a bookmark on it, the
+bookmark shifts to the new changeset.
+
+It is possible to use bookmark names in every revision lookup (e.g. hg
+merge, hg update).
+
+By default, when several bookmarks point to the same changeset, they
+will all move forward together. It is possible to obtain a more
+git-like experience by adding the following configuration option to
+your .hgrc::
+
+  [bookmarks]
+  track.current = True
+
+This will cause Mercurial to track the bookmark that you are currently
+using, and only update it. This is similar to git's approach to
+branching.
+'''
+
+from mercurial.i18n import _
+from mercurial.node import nullid, nullrev, hex, short
+from mercurial import util, commands, repair, extensions
+import os
+
+def write(repo):
+    '''Write bookmarks
+
+    Write the given bookmark => hash dictionary to the .hg/bookmarks file
+    in a format equal to those of localtags.
+
+    We also store a backup of the previous state in undo.bookmarks that
+    can be copied back on rollback.
+    '''
+    refs = repo._bookmarks
+    if os.path.exists(repo.join('bookmarks')):
+        util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
+    if repo._bookmarkcurrent not in refs:
+        setcurrent(repo, None)
+    wlock = repo.wlock()
+    try:
+        file = repo.opener('bookmarks', 'w', atomictemp=True)
+        for refspec, node in refs.iteritems():
+            file.write("%s %s\n" % (hex(node), refspec))
+        file.rename()
+    finally:
+        wlock.release()
+
+def setcurrent(repo, mark):
+    '''Set the name of the bookmark that we are currently on
+
+    Set the name of the bookmark that we are on (hg update <bookmark>).
+    The name is recorded in .hg/bookmarks.current
+    '''
+    current = repo._bookmarkcurrent
+    if current == mark:
+        return
+
+    refs = repo._bookmarks
+
+    # do not update if we do update to a rev equal to the current bookmark
+    if (mark and mark not in refs and
+        current and refs[current] == repo.changectx('.').node()):
+        return
+    if mark not in refs:
+        mark = ''
+    wlock = repo.wlock()
+    try:
+        file = repo.opener('bookmarks.current', 'w', atomictemp=True)
+        file.write(mark)
+        file.rename()
+    finally:
+        wlock.release()
+    repo._bookmarkcurrent = mark
+
+def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
+    '''track a line of development with movable markers
+
+    Bookmarks are pointers to certain commits that move when
+    committing. Bookmarks are local. They can be renamed, copied and
+    deleted. It is possible to use bookmark names in 'hg merge' and
+    'hg update' to merge and update respectively to a given bookmark.
+
+    You can use 'hg bookmark NAME' to set a bookmark on the working
+    directory's parent revision with the given name. If you specify
+    a revision using -r REV (where REV may be an existing bookmark),
+    the bookmark is assigned to that revision.
+    '''
+    hexfn = ui.debugflag and hex or short
+    marks = repo._bookmarks
+    cur   = repo.changectx('.').node()
+
+    if rename:
+        if rename not in marks:
+            raise util.Abort(_("a bookmark of this name does not exist"))
+        if mark in marks and not force:
+            raise util.Abort(_("a bookmark of the same name already exists"))
+        if mark is None:
+            raise util.Abort(_("new bookmark name required"))
+        marks[mark] = marks[rename]
+        del marks[rename]
+        if repo._bookmarkcurrent == rename:
+            setcurrent(repo, mark)
+        write(repo)
+        return
+
+    if delete:
+        if mark is None:
+            raise util.Abort(_("bookmark name required"))
+        if mark not in marks:
+            raise util.Abort(_("a bookmark of this name does not exist"))
+        if mark == repo._bookmarkcurrent:
+            setcurrent(repo, None)
+        del marks[mark]
+        write(repo)
+        return
+
+    if mark != None:
+        if "\n" in mark:
+            raise util.Abort(_("bookmark name cannot contain newlines"))
+        mark = mark.strip()
+        if mark in marks and not force:
+            raise util.Abort(_("a bookmark of the same name already exists"))
+        if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
+            and not force):
+            raise util.Abort(
+                _("a bookmark cannot have the name of an existing branch"))
+        if rev:
+            marks[mark] = repo.lookup(rev)
+        else:
+            marks[mark] = repo.changectx('.').node()
+            setcurrent(repo, mark)
+        write(repo)
+        return
+
+    if mark is None:
+        if rev:
+            raise util.Abort(_("bookmark name required"))
+        if len(marks) == 0:
+            ui.status(_("no bookmarks set\n"))
+        else:
+            for bmark, n in marks.iteritems():
+                if ui.configbool('bookmarks', 'track.current'):
+                    current = repo._bookmarkcurrent
+                    prefix = (bmark == current and n == cur) and '*' or ' '
+                else:
+                    prefix = (n == cur) and '*' or ' '
+
+                if ui.quiet:
+                    ui.write("%s\n" % bmark)
+                else:
+                    ui.write(" %s %-25s %d:%s\n" % (
+                        prefix, bmark, repo.changelog.rev(n), hexfn(n)))
+        return
+
+def _revstostrip(changelog, node):
+    srev = changelog.rev(node)
+    tostrip = [srev]
+    saveheads = []
+    for r in xrange(srev, len(changelog)):
+        parents = changelog.parentrevs(r)
+        if parents[0] in tostrip or parents[1] in tostrip:
+            tostrip.append(r)
+            if parents[1] != nullrev:
+                for p in parents:
+                    if p not in tostrip and p > srev:
+                        saveheads.append(p)
+    return [r for r in tostrip if r not in saveheads]
+
+def strip(oldstrip, ui, repo, node, backup="all"):
+    """Strip bookmarks if revisions are stripped using
+    the mercurial.strip method. This usually happens during
+    qpush and qpop"""
+    revisions = _revstostrip(repo.changelog, node)
+    marks = repo._bookmarks
+    update = []
+    for mark, n in marks.iteritems():
+        if repo.changelog.rev(n) in revisions:
+            update.append(mark)
+    oldstrip(ui, repo, node, backup)
+    if len(update) > 0:
+        for m in update:
+            marks[m] = repo.changectx('.').node()
+        write(repo)
+
+def reposetup(ui, repo):
+    if not repo.local():
+        return
+
+    class bookmark_repo(repo.__class__):
+
+        @util.propertycache
+        def _bookmarks(self):
+            '''Parse .hg/bookmarks file and return a dictionary
+
+            Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
+            in the .hg/bookmarks file. They are read returned as a dictionary
+            with name => hash values.
+            '''
+            try:
+                bookmarks = {}
+                for line in self.opener('bookmarks'):
+                    sha, refspec = line.strip().split(' ', 1)
+                    bookmarks[refspec] = super(bookmark_repo, self).lookup(sha)
+            except:
+                pass
+            return bookmarks
+
+        @util.propertycache
+        def _bookmarkcurrent(self):
+            '''Get the current bookmark
+
+            If we use gittishsh branches we have a current bookmark that
+            we are on. This function returns the name of the bookmark. It
+            is stored in .hg/bookmarks.current
+            '''
+            mark = None
+            if os.path.exists(self.join('bookmarks.current')):
+                file = self.opener('bookmarks.current')
+                # No readline() in posixfile_nt, reading everything is cheap
+                mark = (file.readlines() or [''])[0]
+                if mark == '':
+                    mark = None
+                file.close()
+            return mark
+
+        def rollback(self):
+            if os.path.exists(self.join('undo.bookmarks')):
+                util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
+            return super(bookmark_repo, self).rollback()
+
+        def lookup(self, key):
+            if key in self._bookmarks:
+                key = self._bookmarks[key]
+            return super(bookmark_repo, self).lookup(key)
+
+        def _bookmarksupdate(self, parents, node):
+            marks = self._bookmarks
+            update = False
+            if ui.configbool('bookmarks', 'track.current'):
+                mark = self._bookmarkcurrent
+                if mark and marks[mark] in parents:
+                    marks[mark] = node
+                    update = True
+            else:
+                for mark, n in marks.items():
+                    if n in parents:
+                        marks[mark] = node
+                        update = True
+            if update:
+                write(self)
+
+        def commitctx(self, ctx, error=False):
+            """Add a revision to the repository and
+            move the bookmark"""
+            wlock = self.wlock() # do both commit and bookmark with lock held
+            try:
+                node  = super(bookmark_repo, self).commitctx(ctx, error)
+                if node is None:
+                    return None
+                parents = self.changelog.parents(node)
+                if parents[1] == nullid:
+                    parents = (parents[0],)
+
+                self._bookmarksupdate(parents, node)
+                return node
+            finally:
+                wlock.release()
+
+        def addchangegroup(self, source, srctype, url, emptyok=False):
+            parents = self.dirstate.parents()
+
+            result = super(bookmark_repo, self).addchangegroup(
+                source, srctype, url, emptyok)
+            if result > 1:
+                # We have more heads than before
+                return result
+            node = self.changelog.tip()
+
+            self._bookmarksupdate(parents, node)
+            return result
+
+        def _findtags(self):
+            """Merge bookmarks with normal tags"""
+            (tags, tagtypes) = super(bookmark_repo, self)._findtags()
+            tags.update(self._bookmarks)
+            return (tags, tagtypes)
+
+        if hasattr(repo, 'invalidate'):
+            def invalidate(self):
+                super(bookmark_repo, self).invalidate()
+                for attr in ('_bookmarks', '_bookmarkcurrent'):
+                    if attr in self.__dict__:
+                        delattr(repo, attr)
+
+    repo.__class__ = bookmark_repo
+
+def uisetup(ui):
+    extensions.wrapfunction(repair, "strip", strip)
+    if ui.configbool('bookmarks', 'track.current'):
+        extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
+
+def updatecurbookmark(orig, ui, repo, *args, **opts):
+    '''Set the current bookmark
+
+    If the user updates to a bookmark we update the .hg/bookmarks.current
+    file.
+    '''
+    res = orig(ui, repo, *args, **opts)
+    rev = opts['rev']
+    if not rev and len(args) > 0:
+        rev = args[0]
+    setcurrent(repo, rev)
+    return res
+
+cmdtable = {
+    "bookmarks":
+        (bookmark,
+         [('f', 'force', False, _('force')),
+          ('r', 'rev', '', _('revision')),
+          ('d', 'delete', False, _('delete a given bookmark')),
+          ('m', 'rename', '', _('rename a given bookmark'))],
+         _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
+}
diff --git a/plugins/hg4idea/testData/bin/hgext/bugzilla.py b/plugins/hg4idea/testData/bin/hgext/bugzilla.py
new file mode 100644 (file)
index 0000000..e94b03f
--- /dev/null
@@ -0,0 +1,441 @@
+# bugzilla.py - bugzilla integration for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''hooks for integrating with the Bugzilla bug tracker
+
+This hook extension adds comments on bugs in Bugzilla when changesets
+that refer to bugs by Bugzilla ID are seen. The hook does not change
+bug status.
+
+The hook updates the Bugzilla database directly. Only Bugzilla
+installations using MySQL are supported.
+
+The hook relies on a Bugzilla script to send bug change notification
+emails. That script changes between Bugzilla versions; the
+'processmail' script used prior to 2.18 is replaced in 2.18 and
+subsequent versions by 'config/sendbugmail.pl'. Note that these will
+be run by Mercurial as the user pushing the change; you will need to
+ensure the Bugzilla install file permissions are set appropriately.
+
+The extension is configured through three different configuration
+sections. These keys are recognized in the [bugzilla] section:
+
+host
+  Hostname of the MySQL server holding the Bugzilla database.
+
+db
+  Name of the Bugzilla database in MySQL. Default 'bugs'.
+
+user
+  Username to use to access MySQL server. Default 'bugs'.
+
+password
+  Password to use to access MySQL server.
+
+timeout
+  Database connection timeout (seconds). Default 5.
+
+version
+  Bugzilla version. Specify '3.0' for Bugzilla versions 3.0 and later,
+  '2.18' for Bugzilla versions from 2.18 and '2.16' for versions prior
+  to 2.18.
+
+bzuser
+  Fallback Bugzilla user name to record comments with, if changeset
+  committer cannot be found as a Bugzilla user.
+
+bzdir
+   Bugzilla install directory. Used by default notify. Default
+   '/var/www/html/bugzilla'.
+
+notify
+  The command to run to get Bugzilla to send bug change notification
+  emails. Substitutes from a map with 3 keys, 'bzdir', 'id' (bug id)
+  and 'user' (committer bugzilla email). Default depends on version;
+  from 2.18 it is "cd %(bzdir)s && perl -T contrib/sendbugmail.pl
+  %(id)s %(user)s".
+
+regexp
+  Regular expression to match bug IDs in changeset commit message.
+  Must contain one "()" group. The default expression matches 'Bug
+  1234', 'Bug no. 1234', 'Bug number 1234', 'Bugs 1234,5678', 'Bug
+  1234 and 5678' and variations thereof. Matching is case insensitive.
+
+style
+  The style file to use when formatting comments.
+
+template
+  Template to use when formatting comments. Overrides style if
+  specified. In addition to the usual Mercurial keywords, the
+  extension specifies::
+
+    {bug}       The Bugzilla bug ID.
+    {root}      The full pathname of the Mercurial repository.
+    {webroot}   Stripped pathname of the Mercurial repository.
+    {hgweb}     Base URL for browsing Mercurial repositories.
+
+  Default 'changeset {node|short} in repo {root} refers '
+          'to bug {bug}.\\ndetails:\\n\\t{desc|tabindent}'
+
+strip
+  The number of slashes to strip from the front of {root} to produce
+  {webroot}. Default 0.
+
+usermap
+  Path of file containing Mercurial committer ID to Bugzilla user ID
+  mappings. If specified, the file should contain one mapping per
+  line, "committer"="Bugzilla user". See also the [usermap] section.
+
+The [usermap] section is used to specify mappings of Mercurial
+committer ID to Bugzilla user ID. See also [bugzilla].usermap.
+"committer"="Bugzilla user"
+
+Finally, the [web] section supports one entry:
+
+baseurl
+  Base URL for browsing Mercurial repositories. Reference from
+  templates as {hgweb}.
+
+Activating the extension::
+
+    [extensions]
+    bugzilla =
+
+    [hooks]
+    # run bugzilla hook on every change pulled or pushed in here
+    incoming.bugzilla = python:hgext.bugzilla.hook
+
+Example configuration:
+
+This example configuration is for a collection of Mercurial
+repositories in /var/local/hg/repos/ used with a local Bugzilla 3.2
+installation in /opt/bugzilla-3.2. ::
+
+    [bugzilla]
+    host=localhost
+    password=XYZZY
+    version=3.0
+    bzuser=unknown@domain.com
+    bzdir=/opt/bugzilla-3.2
+    template=Changeset {node|short} in {root|basename}.
+             {hgweb}/{webroot}/rev/{node|short}\\n
+             {desc}\\n
+    strip=5
+
+    [web]
+    baseurl=http://dev.domain.com/hg
+
+    [usermap]
+    user@emaildomain.com=user.name@bugzilladomain.com
+
+Commits add a comment to the Bugzilla bug record of the form::
+
+    Changeset 3b16791d6642 in repository-name.
+    http://dev.domain.com/hg/repository-name/rev/3b16791d6642
+
+    Changeset commit comment. Bug 1234.
+'''
+
+from mercurial.i18n import _
+from mercurial.node import short
+from mercurial import cmdutil, templater, util
+import re, time
+
+MySQLdb = None
+
+def buglist(ids):
+    return '(' + ','.join(map(str, ids)) + ')'
+
+class bugzilla_2_16(object):
+    '''support for bugzilla version 2.16.'''
+
+    def __init__(self, ui):
+        self.ui = ui
+        host = self.ui.config('bugzilla', 'host', 'localhost')
+        user = self.ui.config('bugzilla', 'user', 'bugs')
+        passwd = self.ui.config('bugzilla', 'password')
+        db = self.ui.config('bugzilla', 'db', 'bugs')
+        timeout = int(self.ui.config('bugzilla', 'timeout', 5))
+        usermap = self.ui.config('bugzilla', 'usermap')
+        if usermap:
+            self.ui.readconfig(usermap, sections=['usermap'])
+        self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
+                     (host, db, user, '*' * len(passwd)))
+        self.conn = MySQLdb.connect(host=host, user=user, passwd=passwd,
+                                    db=db, connect_timeout=timeout)
+        self.cursor = self.conn.cursor()
+        self.longdesc_id = self.get_longdesc_id()
+        self.user_ids = {}
+        self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
+
+    def run(self, *args, **kwargs):
+        '''run a query.'''
+        self.ui.note(_('query: %s %s\n') % (args, kwargs))
+        try:
+            self.cursor.execute(*args, **kwargs)
+        except MySQLdb.MySQLError:
+            self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
+            raise
+
+    def get_longdesc_id(self):
+        '''get identity of longdesc field'''
+        self.run('select fieldid from fielddefs where name = "longdesc"')
+        ids = self.cursor.fetchall()
+        if len(ids) != 1:
+            raise util.Abort(_('unknown database schema'))
+        return ids[0][0]
+
+    def filter_real_bug_ids(self, ids):
+        '''filter not-existing bug ids from list.'''
+        self.run('select bug_id from bugs where bug_id in %s' % buglist(ids))
+        return sorted([c[0] for c in self.cursor.fetchall()])
+
+    def filter_unknown_bug_ids(self, node, ids):
+        '''filter bug ids from list that already refer to this changeset.'''
+
+        self.run('''select bug_id from longdescs where
+                    bug_id in %s and thetext like "%%%s%%"''' %
+                 (buglist(ids), short(node)))
+        unknown = set(ids)
+        for (id,) in self.cursor.fetchall():
+            self.ui.status(_('bug %d already knows about changeset %s\n') %
+                           (id, short(node)))
+            unknown.discard(id)
+        return sorted(unknown)
+
+    def notify(self, ids, committer):
+        '''tell bugzilla to send mail.'''
+
+        self.ui.status(_('telling bugzilla to send mail:\n'))
+        (user, userid) = self.get_bugzilla_user(committer)
+        for id in ids:
+            self.ui.status(_('  bug %s\n') % id)
+            cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
+            bzdir = self.ui.config('bugzilla', 'bzdir', '/var/www/html/bugzilla')
+            try:
+                # Backwards-compatible with old notify string, which
+                # took one string. This will throw with a new format
+                # string.
+                cmd = cmdfmt % id
+            except TypeError:
+                cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
+            self.ui.note(_('running notify command %s\n') % cmd)
+            fp = util.popen('(%s) 2>&1' % cmd)
+            out = fp.read()
+            ret = fp.close()
+            if ret:
+                self.ui.warn(out)
+                raise util.Abort(_('bugzilla notify command %s') %
+                                 util.explain_exit(ret)[0])
+        self.ui.status(_('done\n'))
+
+    def get_user_id(self, user):
+        '''look up numeric bugzilla user id.'''
+        try:
+            return self.user_ids[user]
+        except KeyError:
+            try:
+                userid = int(user)
+            except ValueError:
+                self.ui.note(_('looking up user %s\n') % user)
+                self.run('''select userid from profiles
+                            where login_name like %s''', user)
+                all = self.cursor.fetchall()
+                if len(all) != 1:
+                    raise KeyError(user)
+                userid = int(all[0][0])
+            self.user_ids[user] = userid
+            return userid
+
+    def map_committer(self, user):
+        '''map name of committer to bugzilla user name.'''
+        for committer, bzuser in self.ui.configitems('usermap'):
+            if committer.lower() == user.lower():
+                return bzuser
+        return user
+
+    def get_bugzilla_user(self, committer):
+        '''see if committer is a registered bugzilla user. Return
+        bugzilla username and userid if so. If not, return default
+        bugzilla username and userid.'''
+        user = self.map_committer(committer)
+        try:
+            userid = self.get_user_id(user)
+        except KeyError:
+            try:
+                defaultuser = self.ui.config('bugzilla', 'bzuser')
+                if not defaultuser:
+                    raise util.Abort(_('cannot find bugzilla user id for %s') %
+                                     user)
+                userid = self.get_user_id(defaultuser)
+                user = defaultuser
+            except KeyError:
+                raise util.Abort(_('cannot find bugzilla user id for %s or %s') %
+                                 (user, defaultuser))
+        return (user, userid)
+
+    def add_comment(self, bugid, text, committer):
+        '''add comment to bug. try adding comment as committer of
+        changeset, otherwise as default bugzilla user.'''
+        (user, userid) = self.get_bugzilla_user(committer)
+        now = time.strftime('%Y-%m-%d %H:%M:%S')
+        self.run('''insert into longdescs
+                    (bug_id, who, bug_when, thetext)
+                    values (%s, %s, %s, %s)''',
+                 (bugid, userid, now, text))
+        self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
+                    values (%s, %s, %s, %s)''',
+                 (bugid, userid, now, self.longdesc_id))
+        self.conn.commit()
+
+class bugzilla_2_18(bugzilla_2_16):
+    '''support for bugzilla 2.18 series.'''
+
+    def __init__(self, ui):
+        bugzilla_2_16.__init__(self, ui)
+        self.default_notify = \
+            "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
+
+class bugzilla_3_0(bugzilla_2_18):
+    '''support for bugzilla 3.0 series.'''
+
+    def __init__(self, ui):
+        bugzilla_2_18.__init__(self, ui)
+
+    def get_longdesc_id(self):
+        '''get identity of longdesc field'''
+        self.run('select id from fielddefs where name = "longdesc"')
+        ids = self.cursor.fetchall()
+        if len(ids) != 1:
+            raise util.Abort(_('unknown database schema'))
+        return ids[0][0]
+
+class bugzilla(object):
+    # supported versions of bugzilla. different versions have
+    # different schemas.
+    _versions = {
+        '2.16': bugzilla_2_16,
+        '2.18': bugzilla_2_18,
+        '3.0':  bugzilla_3_0
+        }
+
+    _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
+                       r'((?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)')
+
+    _bz = None
+
+    def __init__(self, ui, repo):
+        self.ui = ui
+        self.repo = repo
+
+    def bz(self):
+        '''return object that knows how to talk to bugzilla version in
+        use.'''
+
+        if bugzilla._bz is None:
+            bzversion = self.ui.config('bugzilla', 'version')
+            try:
+                bzclass = bugzilla._versions[bzversion]
+            except KeyError:
+                raise util.Abort(_('bugzilla version %s not supported') %
+                                 bzversion)
+            bugzilla._bz = bzclass(self.ui)
+        return bugzilla._bz
+
+    def __getattr__(self, key):
+        return getattr(self.bz(), key)
+
+    _bug_re = None
+    _split_re = None
+
+    def find_bug_ids(self, ctx):
+        '''find valid bug ids that are referred to in changeset
+        comments and that do not already have references to this
+        changeset.'''
+
+        if bugzilla._bug_re is None:
+            bugzilla._bug_re = re.compile(
+                self.ui.config('bugzilla', 'regexp', bugzilla._default_bug_re),
+                re.IGNORECASE)
+            bugzilla._split_re = re.compile(r'\D+')
+        start = 0
+        ids = set()
+        while True:
+            m = bugzilla._bug_re.search(ctx.description(), start)
+            if not m:
+                break
+            start = m.end()
+            for id in bugzilla._split_re.split(m.group(1)):
+                if not id:
+                    continue
+                ids.add(int(id))
+        if ids:
+            ids = self.filter_real_bug_ids(ids)
+        if ids:
+            ids = self.filter_unknown_bug_ids(ctx.node(), ids)
+        return ids
+
+    def update(self, bugid, ctx):
+        '''update bugzilla bug with reference to changeset.'''
+
+        def webroot(root):
+            '''strip leading prefix of repo root and turn into
+            url-safe path.'''
+            count = int(self.ui.config('bugzilla', 'strip', 0))
+            root = util.pconvert(root)
+            while count > 0:
+                c = root.find('/')
+                if c == -1:
+                    break
+                root = root[c + 1:]
+                count -= 1
+            return root
+
+        mapfile = self.ui.config('bugzilla', 'style')
+        tmpl = self.ui.config('bugzilla', 'template')
+        t = cmdutil.changeset_templater(self.ui, self.repo,
+                                        False, None, mapfile, False)
+        if not mapfile and not tmpl:
+            tmpl = _('changeset {node|short} in repo {root} refers '
+                     'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
+        if tmpl:
+            tmpl = templater.parsestring(tmpl, quoted=False)
+            t.use_template(tmpl)
+        self.ui.pushbuffer()
+        t.show(ctx, changes=ctx.changeset(),
+               bug=str(bugid),
+               hgweb=self.ui.config('web', 'baseurl'),
+               root=self.repo.root,
+               webroot=webroot(self.repo.root))
+        data = self.ui.popbuffer()
+        self.add_comment(bugid, data, util.email(ctx.user()))
+
+def hook(ui, repo, hooktype, node=None, **kwargs):
+    '''add comment to bugzilla for each changeset that refers to a
+    bugzilla bug id. only add a comment once per bug, so same change
+    seen multiple times does not fill bug with duplicate data.'''
+    try:
+        import MySQLdb as mysql
+        global MySQLdb
+        MySQLdb = mysql
+    except ImportError, err:
+        raise util.Abort(_('python mysql support not available: %s') % err)
+
+    if node is None:
+        raise util.Abort(_('hook type %s does not pass a changeset id') %
+                         hooktype)
+    try:
+        bz = bugzilla(ui, repo)
+        ctx = repo[node]
+        ids = bz.find_bug_ids(ctx)
+        if ids:
+            for id in ids:
+                bz.update(id, ctx)
+            bz.notify(ids, util.email(ctx.user()))
+    except MySQLdb.MySQLError, err:
+        raise util.Abort(_('database error: %s') % err[1])
+
diff --git a/plugins/hg4idea/testData/bin/hgext/children.py b/plugins/hg4idea/testData/bin/hgext/children.py
new file mode 100644 (file)
index 0000000..f0df3fd
--- /dev/null
@@ -0,0 +1,44 @@
+# Mercurial extension to provide the 'hg children' command
+#
+# Copyright 2007 by Intevation GmbH <intevation@intevation.de>
+#
+# Author(s):
+# Thomas Arendsen Hein <thomas@intevation.de>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''command to display child changesets'''
+
+from mercurial import cmdutil
+from mercurial.commands import templateopts
+from mercurial.i18n import _
+
+
+def children(ui, repo, file_=None, **opts):
+    """show the children of the given or working directory revision
+
+    Print the children of the working directory's revisions. If a
+    revision is given via -r/--rev, the children of that revision will
+    be printed. If a file argument is given, revision in which the
+    file was last changed (after the working directory revision or the
+    argument to --rev if given) is printed.
+    """
+    rev = opts.get('rev')
+    if file_:
+        ctx = repo.filectx(file_, changeid=rev)
+    else:
+        ctx = repo[rev]
+
+    displayer = cmdutil.show_changeset(ui, repo, opts)
+    for cctx in ctx.children():
+        displayer.show(cctx)
+    displayer.close()
+
+cmdtable = {
+    "children":
+        (children,
+         [('r', 'rev', '', _('show children of the specified revision')),
+         ] + templateopts,
+         _('hg children [-r REV] [FILE]')),
+}
diff --git a/plugins/hg4idea/testData/bin/hgext/churn.py b/plugins/hg4idea/testData/bin/hgext/churn.py
new file mode 100644 (file)
index 0000000..848276c
--- /dev/null
@@ -0,0 +1,193 @@
+# churn.py - create a graph of revisions count grouped by template
+#
+# Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
+# Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''command to display statistics about repository history'''
+
+from mercurial.i18n import _
+from mercurial import patch, cmdutil, util, templater
+import sys, os
+import time, datetime
+
+def maketemplater(ui, repo, tmpl):
+    tmpl = templater.parsestring(tmpl, quoted=False)
+    try:
+        t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
+    except SyntaxError, inst:
+        raise util.Abort(inst.args[0])
+    t.use_template(tmpl)
+    return t
+
+def changedlines(ui, repo, ctx1, ctx2, fns):
+    added, removed = 0, 0
+    fmatch = cmdutil.matchfiles(repo, fns)
+    diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
+    for l in diff.split('\n'):
+        if l.startswith("+") and not l.startswith("+++ "):
+            added += 1
+        elif l.startswith("-") and not l.startswith("--- "):
+            removed += 1
+    return (added, removed)
+
+def countrate(ui, repo, amap, *pats, **opts):
+    """Calculate stats"""
+    if opts.get('dateformat'):
+        def getkey(ctx):
+            t, tz = ctx.date()
+            date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
+            return date.strftime(opts['dateformat'])
+    else:
+        tmpl = opts.get('template', '{author|email}')
+        tmpl = maketemplater(ui, repo, tmpl)
+        def getkey(ctx):
+            ui.pushbuffer()
+            tmpl.show(ctx)
+            return ui.popbuffer()
+
+    state = {'count': 0, 'pct': 0}
+    rate = {}
+    df = False
+    if opts.get('date'):
+        df = util.matchdate(opts['date'])
+
+    m = cmdutil.match(repo, pats, opts)
+    def prep(ctx, fns):
+        rev = ctx.rev()
+        if df and not df(ctx.date()[0]): # doesn't match date format
+            return
+
+        key = getkey(ctx)
+        key = amap.get(key, key) # alias remap
+        if opts.get('changesets'):
+            rate[key] = (rate.get(key, (0,))[0] + 1, 0)
+        else:
+            parents = ctx.parents()
+            if len(parents) > 1:
+                ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
+                return
+
+            ctx1 = parents[0]
+            lines = changedlines(ui, repo, ctx1, ctx, fns)
+            rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
+
+        if opts.get('progress'):
+            state['count'] += 1
+            newpct = int(100.0 * state['count'] / max(len(repo), 1))
+            if state['pct'] < newpct:
+                state['pct'] = newpct
+                ui.write("\r" + _("generating stats: %d%%") % state['pct'])
+                sys.stdout.flush()
+
+    for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
+        continue
+
+    if opts.get('progress'):
+        ui.write("\r")
+        sys.stdout.flush()
+
+    return rate
+
+
+def churn(ui, repo, *pats, **opts):
+    '''histogram of changes to the repository
+
+    This command will display a histogram representing the number
+    of changed lines or revisions, grouped according to the given
+    template. The default template will group changes by author.
+    The --dateformat option may be used to group the results by
+    date instead.
+
+    Statistics are based on the number of changed lines, or
+    alternatively the number of matching revisions if the
+    --changesets option is specified.
+
+    Examples::
+
+      # display count of changed lines for every committer
+      hg churn -t '{author|email}'
+
+      # display daily activity graph
+      hg churn -f '%H' -s -c
+
+      # display activity of developers by month
+      hg churn -f '%Y-%m' -s -c
+
+      # display count of lines changed in every year
+      hg churn -f '%Y' -s
+
+    It is possible to map alternate email addresses to a main address
+    by providing a file using the following format::
+
+      <alias email> <actual email>
+
+    Such a file may be specified with the --aliases option, otherwise
+    a .hgchurn file will be looked for in the working directory root.
+    '''
+    def pad(s, l):
+        return (s + " " * l)[:l]
+
+    amap = {}
+    aliases = opts.get('aliases')
+    if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
+        aliases = repo.wjoin('.hgchurn')
+    if aliases:
+        for l in open(aliases, "r"):
+            l = l.strip()
+            alias, actual = l.split()
+            amap[alias] = actual
+
+    rate = countrate(ui, repo, amap, *pats, **opts).items()
+    if not rate:
+        return
+
+    sortkey = ((not opts.get('sort')) and (lambda x: -sum(x[1])) or None)
+    rate.sort(key=sortkey)
+
+    # Be careful not to have a zero maxcount (issue833)
+    maxcount = float(max(sum(v) for k, v in rate)) or 1.0
+    maxname = max(len(k) for k, v in rate)
+
+    ttywidth = util.termwidth()
+    ui.debug("assuming %i character terminal\n" % ttywidth)
+    width = ttywidth - maxname - 2 - 2 - 2
+
+    if opts.get('diffstat'):
+        width -= 15
+        def format(name, (added, removed)):
+            return "%s %15s %s%s\n" % (pad(name, maxname),
+                                       '+%d/-%d' % (added, removed),
+                                       '+' * charnum(added),
+                                       '-' * charnum(removed))
+    else:
+        width -= 6
+        def format(name, count):
+            return "%s %6d %s\n" % (pad(name, maxname), sum(count),
+                                    '*' * charnum(sum(count)))
+
+    def charnum(count):
+        return int(round(count * width / maxcount))
+
+    for name, count in rate:
+        ui.write(format(name, count))
+
+
+cmdtable = {
+    "churn":
+        (churn,
+         [('r', 'rev', [], _('count rate for the specified revision or range')),
+          ('d', 'date', '', _('count rate for revisions matching date spec')),
+          ('t', 'template', '{author|email}',
+           _('template to group changesets')),
+          ('f', 'dateformat', '',
+              _('strftime-compatible format for grouping by date')),
+          ('c', 'changesets', False, _('count rate by number of changesets')),
+          ('s', 'sort', False, _('sort by key (default: sort by count)')),
+          ('', 'diffstat', False, _('display added/removed lines separately')),
+          ('', 'aliases', '', _('file with email aliases')),
+          ('', 'progress', None, _('show progress'))],
+         _("hg churn [-d DATE] [-r REV] [--aliases FILE] [--progress] [FILE]")),
+}
diff --git a/plugins/hg4idea/testData/bin/hgext/color.py b/plugins/hg4idea/testData/bin/hgext/color.py
new file mode 100644 (file)
index 0000000..6e41489
--- /dev/null
@@ -0,0 +1,374 @@
+# color.py color output for the status and qseries commands
+#
+# Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+'''colorize output from some commands
+
+This extension modifies the status and resolve commands to add color to their
+output to reflect file status, the qseries command to add color to reflect
+patch status (applied, unapplied, missing), and to diff-related
+commands to highlight additions, removals, diff headers, and trailing
+whitespace.
+
+Other effects in addition to color, like bold and underlined text, are
+also available. Effects are rendered with the ECMA-48 SGR control
+function (aka ANSI escape codes). This module also provides the
+render_text function, which can be used to add effects to any text.
+
+Default effects may be overridden from the .hgrc file::
+
+  [color]
+  status.modified = blue bold underline red_background
+  status.added = green bold
+  status.removed = red bold blue_background
+  status.deleted = cyan bold underline
+  status.unknown = magenta bold underline
+  status.ignored = black bold
+
+  # 'none' turns off all effects
+  status.clean = none
+  status.copied = none
+
+  qseries.applied = blue bold underline
+  qseries.unapplied = black bold
+  qseries.missing = red bold
+
+  diff.diffline = bold
+  diff.extended = cyan bold
+  diff.file_a = red bold
+  diff.file_b = green bold
+  diff.hunk = magenta
+  diff.deleted = red
+  diff.inserted = green
+  diff.changed = white
+  diff.trailingwhitespace = bold red_background
+
+  resolve.unresolved = red bold
+  resolve.resolved = green bold
+
+  bookmarks.current = green
+'''
+
+import os, sys
+
+from mercurial import cmdutil, commands, extensions
+from mercurial.i18n import _
+
+# start and stop parameters for effects
+_effect_params = {'none': 0,
+                  'black': 30,
+                  'red': 31,
+                  'green': 32,
+                  'yellow': 33,
+                  'blue': 34,
+                  'magenta': 35,
+                  'cyan': 36,
+                  'white': 37,
+                  'bold': 1,
+                  'italic': 3,
+                  'underline': 4,
+                  'inverse': 7,
+                  'black_background': 40,
+                  'red_background': 41,
+                  'green_background': 42,
+                  'yellow_background': 43,
+                  'blue_background': 44,
+                  'purple_background': 45,
+                  'cyan_background': 46,
+                  'white_background': 47}
+
+def render_effects(text, effects):
+    'Wrap text in commands to turn on each effect.'
+    start = [str(_effect_params[e]) for e in ['none'] + effects]
+    start = '\033[' + ';'.join(start) + 'm'
+    stop = '\033[' + str(_effect_params['none']) + 'm'
+    return ''.join([start, text, stop])
+
+def _colorstatuslike(abbreviations, effectdefs, orig, ui, repo, *pats, **opts):
+    '''run a status-like command with colorized output'''
+    delimiter = opts.get('print0') and '\0' or '\n'
+
+    nostatus = opts.get('no_status')
+    opts['no_status'] = False
+    # run original command and capture its output
+    ui.pushbuffer()
+    retval = orig(ui, repo, *pats, **opts)
+    # filter out empty strings
+    lines_with_status = [line for line in ui.popbuffer().split(delimiter) if line]
+
+    if nostatus:
+        lines = [l[2:] for l in lines_with_status]
+    else:
+        lines = lines_with_status
+
+    # apply color to output and display it
+    for i in xrange(len(lines)):
+        try:
+            status = abbreviations[lines_with_status[i][0]]
+        except KeyError:
+            # Ignore lines with invalid codes, especially in the case of
+            # of unknown filenames containing newlines (issue2036).
+            pass
+        else:
+            effects = effectdefs[status]
+            if effects:
+                lines[i] = render_effects(lines[i], effects)
+        ui.write(lines[i] + delimiter)
+    return retval
+
+
+_status_abbreviations = { 'M': 'modified',
+                          'A': 'added',
+                          'R': 'removed',
+                          '!': 'deleted',
+                          '?': 'unknown',
+                          'I': 'ignored',
+                          'C': 'clean',
+                          ' ': 'copied', }
+
+_status_effects = { 'modified': ['blue', 'bold'],
+                    'added': ['green', 'bold'],
+                    'removed': ['red', 'bold'],
+                    'deleted': ['cyan', 'bold', 'underline'],
+                    'unknown': ['magenta', 'bold', 'underline'],
+                    'ignored': ['black', 'bold'],
+                    'clean': ['none'],
+                    'copied': ['none'], }
+
+def colorstatus(orig, ui, repo, *pats, **opts):
+    '''run the status command with colored output'''
+    return _colorstatuslike(_status_abbreviations, _status_effects,
+                            orig, ui, repo, *pats, **opts)
+
+
+_resolve_abbreviations = { 'U': 'unresolved',
+                           'R': 'resolved', }
+
+_resolve_effects = { 'unresolved': ['red', 'bold'],
+                     'resolved': ['green', 'bold'], }
+
+def colorresolve(orig, ui, repo, *pats, **opts):
+    '''run the resolve command with colored output'''
+    if not opts.get('list'):
+        # only colorize for resolve -l
+        return orig(ui, repo, *pats, **opts)
+    return _colorstatuslike(_resolve_abbreviations, _resolve_effects,
+                            orig, ui, repo, *pats, **opts)
+
+
+_bookmark_effects = { 'current': ['green'] }
+
+def colorbookmarks(orig, ui, repo, *pats, **opts):
+    def colorize(orig, s):
+        lines = s.split('\n')
+        for i, line in enumerate(lines):
+            if line.startswith(" *"):
+                lines[i] = render_effects(line, _bookmark_effects['current'])
+        orig('\n'.join(lines))
+    oldwrite = extensions.wrapfunction(ui, 'write', colorize)
+    try:
+        orig(ui, repo, *pats, **opts)
+    finally:
+        ui.write = oldwrite
+
+def colorqseries(orig, ui, repo, *dummy, **opts):
+    '''run the qseries command with colored output'''
+    ui.pushbuffer()
+    retval = orig(ui, repo, **opts)
+    patchlines = ui.popbuffer().splitlines()
+    patchnames = repo.mq.series
+
+    for patch, patchname in zip(patchlines, patchnames):
+        if opts['missing']:
+            effects = _patch_effects['missing']
+        # Determine if patch is applied.
+        elif [applied for applied in repo.mq.applied
+               if patchname == applied.name]:
+            effects = _patch_effects['applied']
+        else:
+            effects = _patch_effects['unapplied']
+
+        patch = patch.replace(patchname, render_effects(patchname, effects), 1)
+        ui.write(patch + '\n')
+    return retval
+
+_patch_effects = { 'applied': ['blue', 'bold', 'underline'],
+                    'missing': ['red', 'bold'],
+                    'unapplied': ['black', 'bold'], }
+def colorwrap(orig, *args):
+    '''wrap ui.write for colored diff output'''
+    def _colorize(s):
+        lines = s.split('\n')
+        for i, line in enumerate(lines):
+            stripline = line
+            if line and line[0] in '+-':
+                # highlight trailing whitespace, but only in changed lines
+                stripline = line.rstrip()
+            for prefix, style in _diff_prefixes:
+                if stripline.startswith(prefix):
+                    lines[i] = render_effects(stripline, _diff_effects[style])
+                    break
+            if line != stripline:
+                lines[i] += render_effects(
+                    line[len(stripline):], _diff_effects['trailingwhitespace'])
+        return '\n'.join(lines)
+    orig(*[_colorize(s) for s in args])
+
+def colorshowpatch(orig, self, node):
+    '''wrap cmdutil.changeset_printer.showpatch with colored output'''
+    oldwrite = extensions.wrapfunction(self.ui, 'write', colorwrap)
+    try:
+        orig(self, node)
+    finally:
+        self.ui.write = oldwrite
+
+def colordiffstat(orig, s):
+    lines = s.split('\n')
+    for i, line in enumerate(lines):
+        if line and line[-1] in '+-':
+            name, graph = line.rsplit(' ', 1)
+            graph = graph.replace('-',
+                        render_effects('-', _diff_effects['deleted']))
+            graph = graph.replace('+',
+                        render_effects('+', _diff_effects['inserted']))
+            lines[i] = ' '.join([name, graph])
+    orig('\n'.join(lines))
+
+def colordiff(orig, ui, repo, *pats, **opts):
+    '''run the diff command with colored output'''
+    if opts.get('stat'):
+        wrapper = colordiffstat
+    else:
+        wrapper = colorwrap
+    oldwrite = extensions.wrapfunction(ui, 'write', wrapper)
+    try:
+        orig(ui, repo, *pats, **opts)
+    finally:
+        ui.write = oldwrite
+
+def colorchurn(orig, ui, repo, *pats, **opts):
+    '''run the churn command with colored output'''
+    if not opts.get('diffstat'):
+        return orig(ui, repo, *pats, **opts)
+    oldwrite = extensions.wrapfunction(ui, 'write', colordiffstat)
+    try:
+        orig(ui, repo, *pats, **opts)
+    finally:
+        ui.write = oldwrite
+
+_diff_prefixes = [('diff', 'diffline'),
+                  ('copy', 'extended'),
+                  ('rename', 'extended'),
+                  ('old', 'extended'),
+                  ('new', 'extended'),
+                  ('deleted', 'extended'),
+                  ('---', 'file_a'),
+                  ('+++', 'file_b'),
+                  ('@', 'hunk'),
+                  ('-', 'deleted'),
+                  ('+', 'inserted')]
+
+_diff_effects = {'diffline': ['bold'],
+                 'extended': ['cyan', 'bold'],
+                 'file_a': ['red', 'bold'],
+                 'file_b': ['green', 'bold'],
+                 'hunk': ['magenta'],
+                 'deleted': ['red'],
+                 'inserted': ['green'],
+                 'changed': ['white'],
+                 'trailingwhitespace': ['bold', 'red_background']}
+
+def extsetup(ui):
+    '''Initialize the extension.'''
+    _setupcmd(ui, 'diff', commands.table, colordiff, _diff_effects)
+    _setupcmd(ui, 'incoming', commands.table, None, _diff_effects)
+    _setupcmd(ui, 'log', commands.table, None, _diff_effects)
+    _setupcmd(ui, 'outgoing', commands.table, None, _diff_effects)
+    _setupcmd(ui, 'tip', commands.table, None, _diff_effects)
+    _setupcmd(ui, 'status', commands.table, colorstatus, _status_effects)
+    _setupcmd(ui, 'resolve', commands.table, colorresolve, _resolve_effects)
+
+    try:
+        mq = extensions.find('mq')
+        _setupcmd(ui, 'qdiff', mq.cmdtable, colordiff, _diff_effects)
+        _setupcmd(ui, 'qseries', mq.cmdtable, colorqseries, _patch_effects)
+    except KeyError:
+        mq = None
+
+    try:
+        rec = extensions.find('record')
+        _setupcmd(ui, 'record', rec.cmdtable, colordiff, _diff_effects)
+    except KeyError:
+        rec = None
+
+    if mq and rec:
+        _setupcmd(ui, 'qrecord', rec.cmdtable, colordiff, _diff_effects)
+    try:
+        churn = extensions.find('churn')
+        _setupcmd(ui, 'churn', churn.cmdtable, colorchurn, _diff_effects)
+    except KeyError:
+        churn = None
+
+    try:
+        bookmarks = extensions.find('bookmarks')
+        _setupcmd(ui, 'bookmarks', bookmarks.cmdtable, colorbookmarks,
+                  _bookmark_effects)
+    except KeyError:
+        # The bookmarks extension is not enabled
+        pass
+
+def _setupcmd(ui, cmd, table, func, effectsmap):
+    '''patch in command to command table and load effect map'''
+    def nocolor(orig, *args, **opts):
+
+        if (opts['no_color'] or opts['color'] == 'never' or
+            (opts['color'] == 'auto' and (os.environ.get('TERM') == 'dumb'
+                                          or not sys.__stdout__.isatty()))):
+            del opts['no_color']
+            del opts['color']
+            return orig(*args, **opts)
+
+        oldshowpatch = extensions.wrapfunction(cmdutil.changeset_printer,
+                                               'showpatch', colorshowpatch)
+        del opts['no_color']
+        del opts['color']
+        try:
+            if func is not None:
+                return func(orig, *args, **opts)
+            return orig(*args, **opts)
+        finally:
+            cmdutil.changeset_printer.showpatch = oldshowpatch
+
+    entry = extensions.wrapcommand(table, cmd, nocolor)
+    entry[1].extend([
+        ('', 'color', 'auto', _("when to colorize (always, auto, or never)")),
+        ('', 'no-color', None, _("don't colorize output (DEPRECATED)")),
+    ])
+
+    for status in effectsmap:
+        configkey = cmd + '.' + status
+        effects = ui.configlist('color', configkey)
+        if effects:
+            good = []
+            for e in effects:
+                if e in _effect_params:
+                    good.append(e)
+                else:
+                    ui.warn(_("ignoring unknown color/effect %r "
+                              "(configured in color.%s)\n")
+                            % (e, configkey))
+            effectsmap[status] = good
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/__init__.py b/plugins/hg4idea/testData/bin/hgext/convert/__init__.py
new file mode 100644 (file)
index 0000000..a11fbb0
--- /dev/null
@@ -0,0 +1,295 @@
+# convert.py Foreign SCM converter
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''import revisions from foreign VCS repositories into Mercurial'''
+
+import convcmd
+import cvsps
+import subversion
+from mercurial import commands
+from mercurial.i18n import _
+
+# Commands definition was moved elsewhere to ease demandload job.
+
+def convert(ui, src, dest=None, revmapfile=None, **opts):
+    """convert a foreign SCM repository to a Mercurial one.
+
+    Accepted source formats [identifiers]:
+
+    - Mercurial [hg]
+    - CVS [cvs]
+    - Darcs [darcs]
+    - git [git]
+    - Subversion [svn]
+    - Monotone [mtn]
+    - GNU Arch [gnuarch]
+    - Bazaar [bzr]
+    - Perforce [p4]
+
+    Accepted destination formats [identifiers]:
+
+    - Mercurial [hg]
+    - Subversion [svn] (history on branches is not preserved)
+
+    If no revision is given, all revisions will be converted.
+    Otherwise, convert will only import up to the named revision
+    (given in a format understood by the source).
+
+    If no destination directory name is specified, it defaults to the
+    basename of the source with '-hg' appended. If the destination
+    repository doesn't exist, it will be created.
+
+    By default, all sources except Mercurial will use --branchsort.
+    Mercurial uses --sourcesort to preserve original revision numbers
+    order. Sort modes have the following effects:
+
+    --branchsort  convert from parent to child revision when possible,
+                  which means branches are usually converted one after
+                  the other. It generates more compact repositories.
+
+    --datesort    sort revisions by date. Converted repositories have
+                  good-looking changelogs but are often an order of
+                  magnitude larger than the same ones generated by
+                  --branchsort.
+
+    --sourcesort  try to preserve source revisions order, only
+                  supported by Mercurial sources.
+
+    If <REVMAP> isn't given, it will be put in a default location
+    (<dest>/.hg/shamap by default). The <REVMAP> is a simple text file
+    that maps each source commit ID to the destination ID for that
+    revision, like so::
+
+      <source ID> <destination ID>
+
+    If the file doesn't exist, it's automatically created. It's
+    updated on each commit copied, so convert-repo can be interrupted
+    and can be run repeatedly to copy new commits.
+
+    The [username mapping] file is a simple text file that maps each
+    source commit author to a destination commit author. It is handy
+    for source SCMs that use unix logins to identify authors (eg:
+    CVS). One line per author mapping and the line format is:
+    srcauthor=whatever string you want
+
+    The filemap is a file that allows filtering and remapping of files
+    and directories. Comment lines start with '#'. Each line can
+    contain one of the following directives::
+
+      include path/to/file
+
+      exclude path/to/file
+
+      rename from/file to/file
+
+    The 'include' directive causes a file, or all files under a
+    directory, to be included in the destination repository, and the
+    exclusion of all other files and directories not explicitly
+    included. The 'exclude' directive causes files or directories to
+    be omitted. The 'rename' directive renames a file or directory. To
+    rename from a subdirectory into the root of the repository, use
+    '.' as the path to rename to.
+
+    The splicemap is a file that allows insertion of synthetic
+    history, letting you specify the parents of a revision. This is
+    useful if you want to e.g. give a Subversion merge two parents, or
+    graft two disconnected series of history together. Each entry
+    contains a key, followed by a space, followed by one or two
+    comma-separated values. The key is the revision ID in the source
+    revision control system whose parents should be modified (same
+    format as a key in .hg/shamap). The values are the revision IDs
+    (in either the source or destination revision control system) that
+    should be used as the new parents for that node. For example, if
+    you have merged "release-1.0" into "trunk", then you should
+    specify the revision on "trunk" as the first parent and the one on
+    the "release-1.0" branch as the second.
+
+    The branchmap is a file that allows you to rename a branch when it is
+    being brought in from whatever external repository. When used in
+    conjunction with a splicemap, it allows for a powerful combination
+    to help fix even the most badly mismanaged repositories and turn them
+    into nicely structured Mercurial repositories. The branchmap contains
+    lines of the form "original_branch_name new_branch_name".
+    "original_branch_name" is the name of the branch in the source
+    repository, and "new_branch_name" is the name of the branch is the
+    destination repository. This can be used to (for instance) move code
+    in one repository from "default" to a named branch.
+
+    Mercurial Source
+    ----------------
+
+    --config convert.hg.ignoreerrors=False    (boolean)
+        ignore integrity errors when reading. Use it to fix Mercurial
+        repositories with missing revlogs, by converting from and to
+        Mercurial.
+    --config convert.hg.saverev=False         (boolean)
+        store original revision ID in changeset (forces target IDs to
+        change)
+    --config convert.hg.startrev=0            (hg revision identifier)
+        convert start revision and its descendants
+
+    CVS Source
+    ----------
+
+    CVS source will use a sandbox (i.e. a checked-out copy) from CVS
+    to indicate the starting point of what will be converted. Direct
+    access to the repository files is not needed, unless of course the
+    repository is :local:. The conversion uses the top level directory
+    in the sandbox to find the CVS repository, and then uses CVS rlog
+    commands to find files to convert. This means that unless a
+    filemap is given, all files under the starting directory will be
+    converted, and that any directory reorganization in the CVS
+    sandbox is ignored.
+
+    The options shown are the defaults.
+
+    --config convert.cvsps.cache=True         (boolean)
+        Set to False to disable remote log caching, for testing and
+        debugging purposes.
+    --config convert.cvsps.fuzz=60            (integer)
+        Specify the maximum time (in seconds) that is allowed between
+        commits with identical user and log message in a single
+        changeset. When very large files were checked in as part of a
+        changeset then the default may not be long enough.
+    --config convert.cvsps.mergeto='{{mergetobranch ([-\\w]+)}}'
+        Specify a regular expression to which commit log messages are
+        matched. If a match occurs, then the conversion process will
+        insert a dummy revision merging the branch on which this log
+        message occurs to the branch indicated in the regex.
+    --config convert.cvsps.mergefrom='{{mergefrombranch ([-\\w]+)}}'
+        Specify a regular expression to which commit log messages are
+        matched. If a match occurs, then the conversion process will
+        add the most recent revision on the branch indicated in the
+        regex as the second parent of the changeset.
+    --config hook.cvslog
+        Specify a Python function to be called at the end of gathering
+        the CVS log. The function is passed a list with the log entries,
+        and can modify the entries in-place, or add or delete them.
+    --config hook.cvschangesets
+        Specify a Python function to be called after the changesets
+        are calculated from the the CVS log. The function is passed
+        a list with the changeset entries, and can modify the changesets
+        in-place, or add or delete them.
+
+    An additional "debugcvsps" Mercurial command allows the builtin
+    changeset merging code to be run without doing a conversion. Its
+    parameters and output are similar to that of cvsps 2.1. Please see
+    the command help for more details.
+
+    Subversion Source
+    -----------------
+
+    Subversion source detects classical trunk/branches/tags layouts.
+    By default, the supplied "svn://repo/path/" source URL is
+    converted as a single branch. If "svn://repo/path/trunk" exists it
+    replaces the default branch. If "svn://repo/path/branches" exists,
+    its subdirectories are listed as possible branches. If
+    "svn://repo/path/tags" exists, it is looked for tags referencing
+    converted branches. Default "trunk", "branches" and "tags" values
+    can be overridden with following options. Set them to paths
+    relative to the source URL, or leave them blank to disable auto
+    detection.
+
+    --config convert.svn.branches=branches    (directory name)
+        specify the directory containing branches
+    --config convert.svn.tags=tags            (directory name)
+        specify the directory containing tags
+    --config convert.svn.trunk=trunk          (directory name)
+        specify the name of the trunk branch
+
+    Source history can be retrieved starting at a specific revision,
+    instead of being integrally converted. Only single branch
+    conversions are supported.
+
+    --config convert.svn.startrev=0           (svn revision number)
+        specify start Subversion revision.
+
+    Perforce Source
+    ---------------
+
+    The Perforce (P4) importer can be given a p4 depot path or a
+    client specification as source. It will convert all files in the
+    source to a flat Mercurial repository, ignoring labels, branches
+    and integrations. Note that when a depot path is given you then
+    usually should specify a target directory, because otherwise the
+    target may be named ...-hg.
+
+    It is possible to limit the amount of source history to be
+    converted by specifying an initial Perforce revision.
+
+    --config convert.p4.startrev=0            (perforce changelist number)
+        specify initial Perforce revision.
+
+    Mercurial Destination
+    ---------------------
+
+    --config convert.hg.clonebranches=False   (boolean)
+        dispatch source branches in separate clones.
+    --config convert.hg.tagsbranch=default    (branch name)
+        tag revisions branch name
+    --config convert.hg.usebranchnames=True   (boolean)
+        preserve branch names
+
+    """
+    return convcmd.convert(ui, src, dest, revmapfile, **opts)
+
+def debugsvnlog(ui, **opts):
+    return subversion.debugsvnlog(ui, **opts)
+
+def debugcvsps(ui, *args, **opts):
+    '''create changeset information from CVS
+
+    This command is intended as a debugging tool for the CVS to
+    Mercurial converter, and can be used as a direct replacement for
+    cvsps.
+
+    Hg debugcvsps reads the CVS rlog for current directory (or any
+    named directory) in the CVS repository, and converts the log to a
+    series of changesets based on matching commit log entries and
+    dates.'''
+    return cvsps.debugcvsps(ui, *args, **opts)
+
+commands.norepo += " convert debugsvnlog debugcvsps"
+
+cmdtable = {
+    "convert":
+        (convert,
+         [('A', 'authors', '', _('username mapping filename')),
+          ('d', 'dest-type', '', _('destination repository type')),
+          ('', 'filemap', '', _('remap file names using contents of file')),
+          ('r', 'rev', '', _('import up to target revision REV')),
+          ('s', 'source-type', '', _('source repository type')),
+          ('', 'splicemap', '', _('splice synthesized history into place')),
+          ('', 'branchmap', '', _('change branch names while converting')),
+          ('', 'branchsort', None, _('try to sort changesets by branches')),
+          ('', 'datesort', None, _('try to sort changesets by date')),
+          ('', 'sourcesort', None, _('preserve source changesets order'))],
+         _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')),
+    "debugsvnlog":
+        (debugsvnlog,
+         [],
+         'hg debugsvnlog'),
+    "debugcvsps":
+        (debugcvsps,
+         [
+          # Main options shared with cvsps-2.1
+          ('b', 'branches', [], _('only return changes on specified branches')),
+          ('p', 'prefix', '', _('prefix to remove from file names')),
+          ('r', 'revisions', [],
+           _('only return changes after or between specified tags')),
+          ('u', 'update-cache', None, _("update cvs log cache")),
+          ('x', 'new-cache', None, _("create new cvs log cache")),
+          ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
+          ('', 'root', '', _('specify cvsroot')),
+          # Options specific to builtin cvsps
+          ('', 'parents', '', _('show parent changesets')),
+          ('', 'ancestors', '', _('show current changeset in ancestor branches')),
+          # Options that are ignored for compatibility with cvsps-2.1
+          ('A', 'cvs-direct', None, _('ignored for compatibility')),
+         ],
+         _('hg debugcvsps [OPTION]... [PATH]...')),
+}
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/bzr.py b/plugins/hg4idea/testData/bin/hgext/convert/bzr.py
new file mode 100644 (file)
index 0000000..7434d31
--- /dev/null
@@ -0,0 +1,262 @@
+# bzr.py - bzr support for the convert extension
+#
+#  Copyright 2008, 2009 Marek Kubica <marek@xivilization.net> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# This module is for handling 'bzr', that was formerly known as Bazaar-NG;
+# it cannot access 'bar' repositories, but they were never used very much
+
+import os
+from mercurial import demandimport
+# these do not work with demandimport, blacklist
+demandimport.ignore.extend([
+        'bzrlib.transactions',
+        'bzrlib.urlutils',
+        'ElementPath',
+    ])
+
+from mercurial.i18n import _
+from mercurial import util
+from common import NoRepo, commit, converter_source
+
+try:
+    # bazaar imports
+    from bzrlib import branch, revision, errors
+    from bzrlib.revisionspec import RevisionSpec
+except ImportError:
+    pass
+
+supportedkinds = ('file', 'symlink')
+
+class bzr_source(converter_source):
+    """Reads Bazaar repositories by using the Bazaar Python libraries"""
+
+    def __init__(self, ui, path, rev=None):
+        super(bzr_source, self).__init__(ui, path, rev=rev)
+
+        if not os.path.exists(os.path.join(path, '.bzr')):
+            raise NoRepo(_('%s does not look like a Bazaar repository')
+                         % path)
+
+        try:
+            # access bzrlib stuff
+            branch
+        except NameError:
+            raise NoRepo(_('Bazaar modules could not be loaded'))
+
+        path = os.path.abspath(path)
+        self._checkrepotype(path)
+        self.branch = branch.Branch.open(path)
+        self.sourcerepo = self.branch.repository
+        self._parentids = {}
+
+    def _checkrepotype(self, path):
+        # Lightweight checkouts detection is informational but probably
+        # fragile at API level. It should not terminate the conversion.
+        try:
+            from bzrlib import bzrdir
+            dir = bzrdir.BzrDir.open_containing(path)[0]
+            try:
+                tree = dir.open_workingtree(recommend_upgrade=False)
+                branch = tree.branch
+            except (errors.NoWorkingTree, errors.NotLocalUrl), e:
+                tree = None
+                branch = dir.open_branch()
+            if (tree is not None and tree.bzrdir.root_transport.base !=
+                branch.bzrdir.root_transport.base):
+                self.ui.warn(_('warning: lightweight checkouts may cause '
+                               'conversion failures, try with a regular '
+                               'branch instead.\n'))
+        except:
+            self.ui.note(_('bzr source type could not be determined\n'))
+
+    def before(self):
+        """Before the conversion begins, acquire a read lock
+        for all the operations that might need it. Fortunately
+        read locks don't block other reads or writes to the
+        repository, so this shouldn't have any impact on the usage of
+        the source repository.
+
+        The alternative would be locking on every operation that
+        needs locks (there are currently two: getting the file and
+        getting the parent map) and releasing immediately after,
+        but this approach can take even 40% longer."""
+        self.sourcerepo.lock_read()
+
+    def after(self):
+        self.sourcerepo.unlock()
+
+    def getheads(self):
+        if not self.rev:
+            return [self.branch.last_revision()]
+        try:
+            r = RevisionSpec.from_string(self.rev)
+            info = r.in_history(self.branch)
+        except errors.BzrError:
+            raise util.Abort(_('%s is not a valid revision in current branch')
+                             % self.rev)
+        return [info.rev_id]
+
+    def getfile(self, name, rev):
+        revtree = self.sourcerepo.revision_tree(rev)
+        fileid = revtree.path2id(name.decode(self.encoding or 'utf-8'))
+        kind = None
+        if fileid is not None:
+            kind = revtree.kind(fileid)
+        if kind not in supportedkinds:
+            # the file is not available anymore - was deleted
+            raise IOError(_('%s is not available in %s anymore') %
+                    (name, rev))
+        if kind == 'symlink':
+            target = revtree.get_symlink_target(fileid)
+            if target is None:
+                raise util.Abort(_('%s.%s symlink has no target')
+                                 % (name, rev))
+            return target
+        else:
+            sio = revtree.get_file(fileid)
+            return sio.read()
+
+    def getmode(self, name, rev):
+        return self._modecache[(name, rev)]
+
+    def getchanges(self, version):
+        # set up caches: modecache and revtree
+        self._modecache = {}
+        self._revtree = self.sourcerepo.revision_tree(version)
+        # get the parentids from the cache
+        parentids = self._parentids.pop(version)
+        # only diff against first parent id
+        prevtree = self.sourcerepo.revision_tree(parentids[0])
+        return self._gettreechanges(self._revtree, prevtree)
+
+    def getcommit(self, version):
+        rev = self.sourcerepo.get_revision(version)
+        # populate parent id cache
+        if not rev.parent_ids:
+            parents = []
+            self._parentids[version] = (revision.NULL_REVISION,)
+        else:
+            parents = self._filterghosts(rev.parent_ids)
+            self._parentids[version] = parents
+
+        return commit(parents=parents,
+                date='%d %d' % (rev.timestamp, -rev.timezone),
+                author=self.recode(rev.committer),
+                # bzr returns bytestrings or unicode, depending on the content
+                desc=self.recode(rev.message),
+                rev=version)
+
+    def gettags(self):
+        if not self.branch.supports_tags():
+            return {}
+        tagdict = self.branch.tags.get_tag_dict()
+        bytetags = {}
+        for name, rev in tagdict.iteritems():
+            bytetags[self.recode(name)] = rev
+        return bytetags
+
+    def getchangedfiles(self, rev, i):
+        self._modecache = {}
+        curtree = self.sourcerepo.revision_tree(rev)
+        if i is not None:
+            parentid = self._parentids[rev][i]
+        else:
+            # no parent id, get the empty revision
+            parentid = revision.NULL_REVISION
+
+        prevtree = self.sourcerepo.revision_tree(parentid)
+        changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
+        return changes
+
+    def _gettreechanges(self, current, origin):
+        revid = current._revision_id
+        changes = []
+        renames = {}
+        for (fileid, paths, changed_content, versioned, parent, name,
+            kind, executable) in current.iter_changes(origin):
+
+            if paths[0] == u'' or paths[1] == u'':
+                # ignore changes to tree root
+                continue
+
+            # bazaar tracks directories, mercurial does not, so
+            # we have to rename the directory contents
+            if kind[1] == 'directory':
+                if kind[0] not in (None, 'directory'):
+                    # Replacing 'something' with a directory, record it
+                    # so it can be removed.
+                    changes.append((self.recode(paths[0]), revid))
+
+                if None not in paths and paths[0] != paths[1]:
+                    # neither an add nor an delete - a move
+                    # rename all directory contents manually
+                    subdir = origin.inventory.path2id(paths[0])
+                    # get all child-entries of the directory
+                    for name, entry in origin.inventory.iter_entries(subdir):
+                        # hg does not track directory renames
+                        if entry.kind == 'directory':
+                            continue
+                        frompath = self.recode(paths[0] + '/' + name)
+                        topath = self.recode(paths[1] + '/' + name)
+                        # register the files as changed
+                        changes.append((frompath, revid))
+                        changes.append((topath, revid))
+                        # add to mode cache
+                        mode = ((entry.executable and 'x')
+                                or (entry.kind == 'symlink' and 's')
+                                or '')
+                        self._modecache[(topath, revid)] = mode
+                        # register the change as move
+                        renames[topath] = frompath
+
+                # no futher changes, go to the next change
+                continue
+
+            # we got unicode paths, need to convert them
+            path, topath = [self.recode(part) for part in paths]
+
+            if topath is None:
+                # file deleted
+                changes.append((path, revid))
+                continue
+
+            # renamed
+            if path and path != topath:
+                renames[topath] = path
+                changes.append((path, revid))
+
+            # populate the mode cache
+            kind, executable = [e[1] for e in (kind, executable)]
+            mode = ((executable and 'x') or (kind == 'symlink' and 'l')
+                    or '')
+            self._modecache[(topath, revid)] = mode
+            changes.append((topath, revid))
+
+        return changes, renames
+
+    def _filterghosts(self, ids):
+        """Filters out ghost revisions which hg does not support, see
+        <http://bazaar-vcs.org/GhostRevision>
+        """
+        parentmap = self.sourcerepo.get_parent_map(ids)
+        parents = tuple([parent for parent in ids if parent in parentmap])
+        return parents
+
+    def recode(self, s, encoding=None):
+        """This version of recode tries to encode unicode to bytecode,
+        and preferably using the UTF-8 codec.
+        Other types than Unicode are silently returned, this is by
+        intention, e.g. the None-type is not going to be encoded but instead
+        just passed through
+        """
+        if not encoding:
+            encoding = self.encoding or 'utf-8'
+
+        if isinstance(s, unicode):
+            return s.encode(encoding)
+        else:
+            # leave it alone
+            return s
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/common.py b/plugins/hg4idea/testData/bin/hgext/convert/common.py
new file mode 100644 (file)
index 0000000..3c57578
--- /dev/null
@@ -0,0 +1,394 @@
+# common.py - common code for the convert extension
+#
+#  Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import base64, errno
+import os
+import cPickle as pickle
+from mercurial import util
+from mercurial.i18n import _
+
+def encodeargs(args):
+    def encodearg(s):
+        lines = base64.encodestring(s)
+        lines = [l.splitlines()[0] for l in lines]
+        return ''.join(lines)
+
+    s = pickle.dumps(args)
+    return encodearg(s)
+
+def decodeargs(s):
+    s = base64.decodestring(s)
+    return pickle.loads(s)
+
+class MissingTool(Exception):
+    pass
+
+def checktool(exe, name=None, abort=True):
+    name = name or exe
+    if not util.find_exe(exe):
+        exc = abort and util.Abort or MissingTool
+        raise exc(_('cannot find required "%s" tool') % name)
+
+class NoRepo(Exception):
+    pass
+
+SKIPREV = 'SKIP'
+
+class commit(object):
+    def __init__(self, author, date, desc, parents, branch=None, rev=None,
+                 extra={}, sortkey=None):
+        self.author = author or 'unknown'
+        self.date = date or '0 0'
+        self.desc = desc
+        self.parents = parents
+        self.branch = branch
+        self.rev = rev
+        self.extra = extra
+        self.sortkey = sortkey
+
+class converter_source(object):
+    """Conversion source interface"""
+
+    def __init__(self, ui, path=None, rev=None):
+        """Initialize conversion source (or raise NoRepo("message")
+        exception if path is not a valid repository)"""
+        self.ui = ui
+        self.path = path
+        self.rev = rev
+
+        self.encoding = 'utf-8'
+
+    def before(self):
+        pass
+
+    def after(self):
+        pass
+
+    def setrevmap(self, revmap):
+        """set the map of already-converted revisions"""
+        pass
+
+    def getheads(self):
+        """Return a list of this repository's heads"""
+        raise NotImplementedError()
+
+    def getfile(self, name, rev):
+        """Return file contents as a string. rev is the identifier returned
+        by a previous call to getchanges(). Raise IOError to indicate that
+        name was deleted in rev.
+        """
+        raise NotImplementedError()
+
+    def getmode(self, name, rev):
+        """Return file mode, eg. '', 'x', or 'l'. rev is the identifier
+        returned by a previous call to getchanges().
+        """
+        raise NotImplementedError()
+
+    def getchanges(self, version):
+        """Returns a tuple of (files, copies).
+
+        files is a sorted list of (filename, id) tuples for all files
+        changed between version and its first parent returned by
+        getcommit(). id is the source revision id of the file.
+
+        copies is a dictionary of dest: source
+        """
+        raise NotImplementedError()
+
+    def getcommit(self, version):
+        """Return the commit object for version"""
+        raise NotImplementedError()
+
+    def gettags(self):
+        """Return the tags as a dictionary of name: revision
+
+        Tag names must be UTF-8 strings.
+        """
+        raise NotImplementedError()
+
+    def recode(self, s, encoding=None):
+        if not encoding:
+            encoding = self.encoding or 'utf-8'
+
+        if isinstance(s, unicode):
+            return s.encode("utf-8")
+        try:
+            return s.decode(encoding).encode("utf-8")
+        except:
+            try:
+                return s.decode("latin-1").encode("utf-8")
+            except:
+                return s.decode(encoding, "replace").encode("utf-8")
+
+    def getchangedfiles(self, rev, i):
+        """Return the files changed by rev compared to parent[i].
+
+        i is an index selecting one of the parents of rev.  The return
+        value should be the list of files that are different in rev and
+        this parent.
+
+        If rev has no parents, i is None.
+
+        This function is only needed to support --filemap
+        """
+        raise NotImplementedError()
+
+    def converted(self, rev, sinkrev):
+        '''Notify the source that a revision has been converted.'''
+        pass
+
+    def hasnativeorder(self):
+        """Return true if this source has a meaningful, native revision
+        order. For instance, Mercurial revisions are store sequentially
+        while there is no such global ordering with Darcs.
+        """
+        return False
+
+    def lookuprev(self, rev):
+        """If rev is a meaningful revision reference in source, return
+        the referenced identifier in the same format used by getcommit().
+        return None otherwise.
+        """
+        return None
+
+class converter_sink(object):
+    """Conversion sink (target) interface"""
+
+    def __init__(self, ui, path):
+        """Initialize conversion sink (or raise NoRepo("message")
+        exception if path is not a valid repository)
+
+        created is a list of paths to remove if a fatal error occurs
+        later"""
+        self.ui = ui
+        self.path = path
+        self.created = []
+
+    def getheads(self):
+        """Return a list of this repository's heads"""
+        raise NotImplementedError()
+
+    def revmapfile(self):
+        """Path to a file that will contain lines
+        source_rev_id sink_rev_id
+        mapping equivalent revision identifiers for each system."""
+        raise NotImplementedError()
+
+    def authorfile(self):
+        """Path to a file that will contain lines
+        srcauthor=dstauthor
+        mapping equivalent authors identifiers for each system."""
+        return None
+
+    def putcommit(self, files, copies, parents, commit, source, revmap):
+        """Create a revision with all changed files listed in 'files'
+        and having listed parents. 'commit' is a commit object
+        containing at a minimum the author, date, and message for this
+        changeset.  'files' is a list of (path, version) tuples,
+        'copies' is a dictionary mapping destinations to sources,
+        'source' is the source repository, and 'revmap' is a mapfile
+        of source revisions to converted revisions. Only getfile(),
+        getmode(), and lookuprev() should be called on 'source'.
+
+        Note that the sink repository is not told to update itself to
+        a particular revision (or even what that revision would be)
+        before it receives the file data.
+        """
+        raise NotImplementedError()
+
+    def puttags(self, tags):
+        """Put tags into sink.
+
+        tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
+        Return a pair (tag_revision, tag_parent_revision), or (None, None)
+        if nothing was changed.
+        """
+        raise NotImplementedError()
+
+    def setbranch(self, branch, pbranches):
+        """Set the current branch name. Called before the first putcommit
+        on the branch.
+        branch: branch name for subsequent commits
+        pbranches: (converted parent revision, parent branch) tuples"""
+        pass
+
+    def setfilemapmode(self, active):
+        """Tell the destination that we're using a filemap
+
+        Some converter_sources (svn in particular) can claim that a file
+        was changed in a revision, even if there was no change.  This method
+        tells the destination that we're using a filemap and that it should
+        filter empty revisions.
+        """
+        pass
+
+    def before(self):
+        pass
+
+    def after(self):
+        pass
+
+
+class commandline(object):
+    def __init__(self, ui, command):
+        self.ui = ui
+        self.command = command
+
+    def prerun(self):
+        pass
+
+    def postrun(self):
+        pass
+
+    def _cmdline(self, cmd, *args, **kwargs):
+        cmdline = [self.command, cmd] + list(args)
+        for k, v in kwargs.iteritems():
+            if len(k) == 1:
+                cmdline.append('-' + k)
+            else:
+                cmdline.append('--' + k.replace('_', '-'))
+            try:
+                if len(k) == 1:
+                    cmdline.append('' + v)
+                else:
+                    cmdline[-1] += '=' + v
+            except TypeError:
+                pass
+        cmdline = [util.shellquote(arg) for arg in cmdline]
+        if not self.ui.debugflag:
+            cmdline += ['2>', util.nulldev]
+        cmdline += ['<', util.nulldev]
+        cmdline = ' '.join(cmdline)
+        return cmdline
+
+    def _run(self, cmd, *args, **kwargs):
+        cmdline = self._cmdline(cmd, *args, **kwargs)
+        self.ui.debug('running: %s\n' % (cmdline,))
+        self.prerun()
+        try:
+            return util.popen(cmdline)
+        finally:
+            self.postrun()
+
+    def run(self, cmd, *args, **kwargs):
+        fp = self._run(cmd, *args, **kwargs)
+        output = fp.read()
+        self.ui.debug(output)
+        return output, fp.close()
+
+    def runlines(self, cmd, *args, **kwargs):
+        fp = self._run(cmd, *args, **kwargs)
+        output = fp.readlines()
+        self.ui.debug(''.join(output))
+        return output, fp.close()
+
+    def checkexit(self, status, output=''):
+        if status:
+            if output:
+                self.ui.warn(_('%s error:\n') % self.command)
+                self.ui.warn(output)
+            msg = util.explain_exit(status)[0]
+            raise util.Abort('%s %s' % (self.command, msg))
+
+    def run0(self, cmd, *args, **kwargs):
+        output, status = self.run(cmd, *args, **kwargs)
+        self.checkexit(status, output)
+        return output
+
+    def runlines0(self, cmd, *args, **kwargs):
+        output, status = self.runlines(cmd, *args, **kwargs)
+        self.checkexit(status, ''.join(output))
+        return output
+
+    def getargmax(self):
+        if '_argmax' in self.__dict__:
+            return self._argmax
+
+        # POSIX requires at least 4096 bytes for ARG_MAX
+        self._argmax = 4096
+        try:
+            self._argmax = os.sysconf("SC_ARG_MAX")
+        except:
+            pass
+
+        # Windows shells impose their own limits on command line length,
+        # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
+        # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
+        # details about cmd.exe limitations.
+
+        # Since ARG_MAX is for command line _and_ environment, lower our limit
+        # (and make happy Windows shells while doing this).
+
+        self._argmax = self._argmax / 2 - 1
+        return self._argmax
+
+    def limit_arglist(self, arglist, cmd, *args, **kwargs):
+        limit = self.getargmax() - len(self._cmdline(cmd, *args, **kwargs))
+        bytes = 0
+        fl = []
+        for fn in arglist:
+            b = len(fn) + 3
+            if bytes + b < limit or len(fl) == 0:
+                fl.append(fn)
+                bytes += b
+            else:
+                yield fl
+                fl = [fn]
+                bytes = b
+        if fl:
+            yield fl
+
+    def xargs(self, arglist, cmd, *args, **kwargs):
+        for l in self.limit_arglist(arglist, cmd, *args, **kwargs):
+            self.run0(cmd, *(list(args) + l), **kwargs)
+
+class mapfile(dict):
+    def __init__(self, ui, path):
+        super(mapfile, self).__init__()
+        self.ui = ui
+        self.path = path
+        self.fp = None
+        self.order = []
+        self._read()
+
+    def _read(self):
+        if not self.path:
+            return
+        try:
+            fp = open(self.path, 'r')
+        except IOError, err:
+            if err.errno != errno.ENOENT:
+                raise
+            return
+        for i, line in enumerate(fp):
+            try:
+                key, value = line.splitlines()[0].rsplit(' ', 1)
+            except ValueError:
+                raise util.Abort(
+                    _('syntax error in %s(%d): key/value pair expected')
+                    % (self.path, i + 1))
+            if key not in self:
+                self.order.append(key)
+            super(mapfile, self).__setitem__(key, value)
+        fp.close()
+
+    def __setitem__(self, key, value):
+        if self.fp is None:
+            try:
+                self.fp = open(self.path, 'a')
+            except IOError, err:
+                raise util.Abort(_('could not open map file %r: %s') %
+                                 (self.path, err.strerror))
+        self.fp.write('%s %s\n' % (key, value))
+        self.fp.flush()
+        super(mapfile, self).__setitem__(key, value)
+
+    def close(self):
+        if self.fp:
+            self.fp.close()
+            self.fp = None
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/convcmd.py b/plugins/hg4idea/testData/bin/hgext/convert/convcmd.py
new file mode 100644 (file)
index 0000000..0c0eb52
--- /dev/null
@@ -0,0 +1,404 @@
+# convcmd - convert extension commands definition
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from common import NoRepo, MissingTool, SKIPREV, mapfile
+from cvs import convert_cvs
+from darcs import darcs_source
+from git import convert_git
+from hg import mercurial_source, mercurial_sink
+from subversion import svn_source, svn_sink
+from monotone import monotone_source
+from gnuarch import gnuarch_source
+from bzr import bzr_source
+from p4 import p4_source
+import filemap
+
+import os, shutil
+from mercurial import hg, util, encoding
+from mercurial.i18n import _
+
+orig_encoding = 'ascii'
+
+def recode(s):
+    if isinstance(s, unicode):
+        return s.encode(orig_encoding, 'replace')
+    else:
+        return s.decode('utf-8').encode(orig_encoding, 'replace')
+
+source_converters = [
+    ('cvs', convert_cvs, 'branchsort'),
+    ('git', convert_git, 'branchsort'),
+    ('svn', svn_source, 'branchsort'),
+    ('hg', mercurial_source, 'sourcesort'),
+    ('darcs', darcs_source, 'branchsort'),
+    ('mtn', monotone_source, 'branchsort'),
+    ('gnuarch', gnuarch_source, 'branchsort'),
+    ('bzr', bzr_source, 'branchsort'),
+    ('p4', p4_source, 'branchsort'),
+    ]
+
+sink_converters = [
+    ('hg', mercurial_sink),
+    ('svn', svn_sink),
+    ]
+
+def convertsource(ui, path, type, rev):
+    exceptions = []
+    if type and type not in [s[0] for s in source_converters]:
+        raise util.Abort(_('%s: invalid source repository type') % type)
+    for name, source, sortmode in source_converters:
+        try:
+            if not type or name == type:
+                return source(ui, path, rev), sortmode
+        except (NoRepo, MissingTool), inst:
+            exceptions.append(inst)
+    if not ui.quiet:
+        for inst in exceptions:
+            ui.write("%s\n" % inst)
+    raise util.Abort(_('%s: missing or unsupported repository') % path)
+
+def convertsink(ui, path, type):
+    if type and type not in [s[0] for s in sink_converters]:
+        raise util.Abort(_('%s: invalid destination repository type') % type)
+    for name, sink in sink_converters:
+        try:
+            if not type or name == type:
+                return sink(ui, path)
+        except NoRepo, inst:
+            ui.note(_("convert: %s\n") % inst)
+    raise util.Abort(_('%s: unknown repository type') % path)
+
+class converter(object):
+    def __init__(self, ui, source, dest, revmapfile, opts):
+
+        self.source = source
+        self.dest = dest
+        self.ui = ui
+        self.opts = opts
+        self.commitcache = {}
+        self.authors = {}
+        self.authorfile = None
+
+        # Record converted revisions persistently: maps source revision
+        # ID to target revision ID (both strings).  (This is how
+        # incremental conversions work.)
+        self.map = mapfile(ui, revmapfile)
+
+        # Read first the dst author map if any
+        authorfile = self.dest.authorfile()
+        if authorfile and os.path.exists(authorfile):
+            self.readauthormap(authorfile)
+        # Extend/Override with new author map if necessary
+        if opts.get('authors'):
+            self.readauthormap(opts.get('authors'))
+            self.authorfile = self.dest.authorfile()
+
+        self.splicemap = mapfile(ui, opts.get('splicemap'))
+        self.branchmap = mapfile(ui, opts.get('branchmap'))
+
+    def walktree(self, heads):
+        '''Return a mapping that identifies the uncommitted parents of every
+        uncommitted changeset.'''
+        visit = heads
+        known = set()
+        parents = {}
+        while visit:
+            n = visit.pop(0)
+            if n in known or n in self.map:
+                continue
+            known.add(n)
+            commit = self.cachecommit(n)
+            parents[n] = []
+            for p in commit.parents:
+                parents[n].append(p)
+                visit.append(p)
+
+        return parents
+
+    def toposort(self, parents, sortmode):
+        '''Return an ordering such that every uncommitted changeset is
+        preceeded by all its uncommitted ancestors.'''
+
+        def mapchildren(parents):
+            """Return a (children, roots) tuple where 'children' maps parent
+            revision identifiers to children ones, and 'roots' is the list of
+            revisions without parents. 'parents' must be a mapping of revision
+            identifier to its parents ones.
+            """
+            visit = parents.keys()
+            seen = set()
+            children = {}
+            roots = []
+
+            while visit:
+                n = visit.pop(0)
+                if n in seen:
+                    continue
+                seen.add(n)
+                # Ensure that nodes without parents are present in the
+                # 'children' mapping.
+                children.setdefault(n, [])
+                hasparent = False
+                for p in parents[n]:
+                    if not p in self.map:
+                        visit.append(p)
+                        hasparent = True
+                    children.setdefault(p, []).append(n)
+                if not hasparent:
+                    roots.append(n)
+
+            return children, roots
+
+        # Sort functions are supposed to take a list of revisions which
+        # can be converted immediately and pick one
+
+        def makebranchsorter():
+            """If the previously converted revision has a child in the
+            eligible revisions list, pick it. Return the list head
+            otherwise. Branch sort attempts to minimize branch
+            switching, which is harmful for Mercurial backend
+            compression.
+            """
+            prev = [None]
+            def picknext(nodes):
+                next = nodes[0]
+                for n in nodes:
+                    if prev[0] in parents[n]:
+                        next = n
+                        break
+                prev[0] = next
+                return next
+            return picknext
+
+        def makesourcesorter():
+            """Source specific sort."""
+            keyfn = lambda n: self.commitcache[n].sortkey
+            def picknext(nodes):
+                return sorted(nodes, key=keyfn)[0]
+            return picknext
+
+        def makedatesorter():
+            """Sort revisions by date."""
+            dates = {}
+            def getdate(n):
+                if n not in dates:
+                    dates[n] = util.parsedate(self.commitcache[n].date)
+                return dates[n]
+
+            def picknext(nodes):
+                return min([(getdate(n), n) for n in nodes])[1]
+
+            return picknext
+
+        if sortmode == 'branchsort':
+            picknext = makebranchsorter()
+        elif sortmode == 'datesort':
+            picknext = makedatesorter()
+        elif sortmode == 'sourcesort':
+            picknext = makesourcesorter()
+        else:
+            raise util.Abort(_('unknown sort mode: %s') % sortmode)
+
+        children, actives = mapchildren(parents)
+
+        s = []
+        pendings = {}
+        while actives:
+            n = picknext(actives)
+            actives.remove(n)
+            s.append(n)
+
+            # Update dependents list
+            for c in children.get(n, []):
+                if c not in pendings:
+                    pendings[c] = [p for p in parents[c] if p not in self.map]
+                try:
+                    pendings[c].remove(n)
+                except ValueError:
+                    raise util.Abort(_('cycle detected between %s and %s')
+                                       % (recode(c), recode(n)))
+                if not pendings[c]:
+                    # Parents are converted, node is eligible
+                    actives.insert(0, c)
+                    pendings[c] = None
+
+        if len(s) != len(parents):
+            raise util.Abort(_("not all revisions were sorted"))
+
+        return s
+
+    def writeauthormap(self):
+        authorfile = self.authorfile
+        if authorfile:
+            self.ui.status(_('Writing author map file %s\n') % authorfile)
+            ofile = open(authorfile, 'w+')
+            for author in self.authors:
+                ofile.write("%s=%s\n" % (author, self.authors[author]))
+            ofile.close()
+
+    def readauthormap(self, authorfile):
+        afile = open(authorfile, 'r')
+        for line in afile:
+
+            line = line.strip()
+            if not line or line.startswith('#'):
+                continue
+
+            try:
+                srcauthor, dstauthor = line.split('=', 1)
+            except ValueError:
+                msg = _('Ignoring bad line in author map file %s: %s\n')
+                self.ui.warn(msg % (authorfile, line.rstrip()))
+                continue
+
+            srcauthor = srcauthor.strip()
+            dstauthor = dstauthor.strip()
+            if self.authors.get(srcauthor) in (None, dstauthor):
+                msg = _('mapping author %s to %s\n')
+                self.ui.debug(msg % (srcauthor, dstauthor))
+                self.authors[srcauthor] = dstauthor
+                continue
+
+            m = _('overriding mapping for author %s, was %s, will be %s\n')
+            self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor))
+
+        afile.close()
+
+    def cachecommit(self, rev):
+        commit = self.source.getcommit(rev)
+        commit.author = self.authors.get(commit.author, commit.author)
+        commit.branch = self.branchmap.get(commit.branch, commit.branch)
+        self.commitcache[rev] = commit
+        return commit
+
+    def copy(self, rev):
+        commit = self.commitcache[rev]
+
+        changes = self.source.getchanges(rev)
+        if isinstance(changes, basestring):
+            if changes == SKIPREV:
+                dest = SKIPREV
+            else:
+                dest = self.map[changes]
+            self.map[rev] = dest
+            return
+        files, copies = changes
+        pbranches = []
+        if commit.parents:
+            for prev in commit.parents:
+                if prev not in self.commitcache:
+                    self.cachecommit(prev)
+                pbranches.append((self.map[prev],
+                                  self.commitcache[prev].branch))
+        self.dest.setbranch(commit.branch, pbranches)
+        try:
+            parents = self.splicemap[rev].replace(',', ' ').split()
+            self.ui.status(_('spliced in %s as parents of %s\n') %
+                           (parents, rev))
+            parents = [self.map.get(p, p) for p in parents]
+        except KeyError:
+            parents = [b[0] for b in pbranches]
+        newnode = self.dest.putcommit(files, copies, parents, commit,
+                                      self.source, self.map)
+        self.source.converted(rev, newnode)
+        self.map[rev] = newnode
+
+    def convert(self, sortmode):
+        try:
+            self.source.before()
+            self.dest.before()
+            self.source.setrevmap(self.map)
+            self.ui.status(_("scanning source...\n"))
+            heads = self.source.getheads()
+            parents = self.walktree(heads)
+            self.ui.status(_("sorting...\n"))
+            t = self.toposort(parents, sortmode)
+            num = len(t)
+            c = None
+
+            self.ui.status(_("converting...\n"))
+            for c in t:
+                num -= 1
+                desc = self.commitcache[c].desc
+                if "\n" in desc:
+                    desc = desc.splitlines()[0]
+                # convert log message to local encoding without using
+                # tolocal() because encoding.encoding conver() use it as
+                # 'utf-8'
+                self.ui.status("%d %s\n" % (num, recode(desc)))
+                self.ui.note(_("source: %s\n") % recode(c))
+                self.copy(c)
+
+            tags = self.source.gettags()
+            ctags = {}
+            for k in tags:
+                v = tags[k]
+                if self.map.get(v, SKIPREV) != SKIPREV:
+                    ctags[k] = self.map[v]
+
+            if c and ctags:
+                nrev, tagsparent = self.dest.puttags(ctags)
+                if nrev and tagsparent:
+                    # write another hash correspondence to override the previous
+                    # one so we don't end up with extra tag heads
+                    tagsparents = [e for e in self.map.iteritems()
+                                   if e[1] == tagsparent]
+                    if tagsparents:
+                        self.map[tagsparents[0][0]] = nrev
+
+            self.writeauthormap()
+        finally:
+            self.cleanup()
+
+    def cleanup(self):
+        try:
+            self.dest.after()
+        finally:
+            self.source.after()
+        self.map.close()
+
+def convert(ui, src, dest=None, revmapfile=None, **opts):
+    global orig_encoding
+    orig_encoding = encoding.encoding
+    encoding.encoding = 'UTF-8'
+
+    if not dest:
+        dest = hg.defaultdest(src) + "-hg"
+        ui.status(_("assuming destination %s\n") % dest)
+
+    destc = convertsink(ui, dest, opts.get('dest_type'))
+
+    try:
+        srcc, defaultsort = convertsource(ui, src, opts.get('source_type'),
+                                          opts.get('rev'))
+    except Exception:
+        for path in destc.created:
+            shutil.rmtree(path, True)
+        raise
+
+    sortmodes = ('branchsort', 'datesort', 'sourcesort')
+    sortmode = [m for m in sortmodes if opts.get(m)]
+    if len(sortmode) > 1:
+        raise util.Abort(_('more than one sort mode specified'))
+    sortmode = sortmode and sortmode[0] or defaultsort
+    if sortmode == 'sourcesort' and not srcc.hasnativeorder():
+        raise util.Abort(_('--sourcesort is not supported by this data source'))
+
+    fmap = opts.get('filemap')
+    if fmap:
+        srcc = filemap.filemap_source(ui, srcc, fmap)
+        destc.setfilemapmode(True)
+
+    if not revmapfile:
+        try:
+            revmapfile = destc.revmapfile()
+        except:
+            revmapfile = os.path.join(destc, "map")
+
+    c = converter(ui, srcc, destc, revmapfile, opts)
+    c.convert(sortmode)
+
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/cvs.py b/plugins/hg4idea/testData/bin/hgext/convert/cvs.py
new file mode 100644 (file)
index 0000000..fc40b4b
--- /dev/null
@@ -0,0 +1,282 @@
+# cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
+#
+#  Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os, locale, re, socket, errno
+from cStringIO import StringIO
+from mercurial import util
+from mercurial.i18n import _
+
+from common import NoRepo, commit, converter_source, checktool
+import cvsps
+
+class convert_cvs(converter_source):
+    def __init__(self, ui, path, rev=None):
+        super(convert_cvs, self).__init__(ui, path, rev=rev)
+
+        cvs = os.path.join(path, "CVS")
+        if not os.path.exists(cvs):
+            raise NoRepo(_("%s does not look like a CVS checkout") % path)
+
+        checktool('cvs')
+
+        self.changeset = None
+        self.files = {}
+        self.tags = {}
+        self.lastbranch = {}
+        self.socket = None
+        self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
+        self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
+        self.encoding = locale.getpreferredencoding()
+
+        self._connect()
+
+    def _parse(self):
+        if self.changeset is not None:
+            return
+        self.changeset = {}
+
+        maxrev = 0
+        if self.rev:
+            # TODO: handle tags
+            try:
+                # patchset number?
+                maxrev = int(self.rev)
+            except ValueError:
+                raise util.Abort(_('revision %s is not a patchset number')
+                                 % self.rev)
+
+        d = os.getcwd()
+        try:
+            os.chdir(self.path)
+            id = None
+            state = 0
+            filerevids = {}
+
+            cache = 'update'
+            if not self.ui.configbool('convert', 'cvsps.cache', True):
+                cache = None
+            db = cvsps.createlog(self.ui, cache=cache)
+            db = cvsps.createchangeset(self.ui, db,
+                fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)),
+                mergeto=self.ui.config('convert', 'cvsps.mergeto', None),
+                mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None))
+
+            for cs in db:
+                if maxrev and cs.id > maxrev:
+                    break
+                id = str(cs.id)
+                cs.author = self.recode(cs.author)
+                self.lastbranch[cs.branch] = id
+                cs.comment = self.recode(cs.comment)
+                date = util.datestr(cs.date)
+                self.tags.update(dict.fromkeys(cs.tags, id))
+
+                files = {}
+                for f in cs.entries:
+                    files[f.file] = "%s%s" % ('.'.join([str(x)
+                                                        for x in f.revision]),
+                                              ['', '(DEAD)'][f.dead])
+
+                # add current commit to set
+                c = commit(author=cs.author, date=date,
+                           parents=[str(p.id) for p in cs.parents],
+                           desc=cs.comment, branch=cs.branch or '')
+                self.changeset[id] = c
+                self.files[id] = files
+
+            self.heads = self.lastbranch.values()
+        finally:
+            os.chdir(d)
+
+    def _connect(self):
+        root = self.cvsroot
+        conntype = None
+        user, host = None, None
+        cmd = ['cvs', 'server']
+
+        self.ui.status(_("connecting to %s\n") % root)
+
+        if root.startswith(":pserver:"):
+            root = root[9:]
+            m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
+                         root)
+            if m:
+                conntype = "pserver"
+                user, passw, serv, port, root = m.groups()
+                if not user:
+                    user = "anonymous"
+                if not port:
+                    port = 2401
+                else:
+                    port = int(port)
+                format0 = ":pserver:%s@%s:%s" % (user, serv, root)
+                format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
+
+                if not passw:
+                    passw = "A"
+                    cvspass = os.path.expanduser("~/.cvspass")
+                    try:
+                        pf = open(cvspass)
+                        for line in pf.read().splitlines():
+                            part1, part2 = line.split(' ', 1)
+                            if part1 == '/1':
+                                # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
+                                part1, part2 = part2.split(' ', 1)
+                                format = format1
+                            else:
+                                # :pserver:user@example.com:/cvsroot/foo Ah<Z
+                                format = format0
+                            if part1 == format:
+                                passw = part2
+                                break
+                        pf.close()
+                    except IOError, inst:
+                        if inst.errno != errno.ENOENT:
+                            if not getattr(inst, 'filename', None):
+                                inst.filename = cvspass
+                            raise
+
+                sck = socket.socket()
+                sck.connect((serv, port))
+                sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
+                                    "END AUTH REQUEST", ""]))
+                if sck.recv(128) != "I LOVE YOU\n":
+                    raise util.Abort(_("CVS pserver authentication failed"))
+
+                self.writep = self.readp = sck.makefile('r+')
+
+        if not conntype and root.startswith(":local:"):
+            conntype = "local"
+            root = root[7:]
+
+        if not conntype:
+            # :ext:user@host/home/user/path/to/cvsroot
+            if root.startswith(":ext:"):
+                root = root[5:]
+            m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
+            # Do not take Windows path "c:\foo\bar" for a connection strings
+            if os.path.isdir(root) or not m:
+                conntype = "local"
+            else:
+                conntype = "rsh"
+                user, host, root = m.group(1), m.group(2), m.group(3)
+
+        if conntype != "pserver":
+            if conntype == "rsh":
+                rsh = os.environ.get("CVS_RSH") or "ssh"
+                if user:
+                    cmd = [rsh, '-l', user, host] + cmd
+                else:
+                    cmd = [rsh, host] + cmd
+
+            # popen2 does not support argument lists under Windows
+            cmd = [util.shellquote(arg) for arg in cmd]
+            cmd = util.quotecommand(' '.join(cmd))
+            self.writep, self.readp = util.popen2(cmd)
+
+        self.realroot = root
+
+        self.writep.write("Root %s\n" % root)
+        self.writep.write("Valid-responses ok error Valid-requests Mode"
+                          " M Mbinary E Checked-in Created Updated"
+                          " Merged Removed\n")
+        self.writep.write("valid-requests\n")
+        self.writep.flush()
+        r = self.readp.readline()
+        if not r.startswith("Valid-requests"):
+            raise util.Abort(_('unexpected response from CVS server '
+                               '(expected "Valid-requests", but got %r)')
+                             % r)
+        if "UseUnchanged" in r:
+            self.writep.write("UseUnchanged\n")
+            self.writep.flush()
+            r = self.readp.readline()
+
+    def getheads(self):
+        self._parse()
+        return self.heads
+
+    def _getfile(self, name, rev):
+
+        def chunkedread(fp, count):
+            # file-objects returned by socked.makefile() do not handle
+            # large read() requests very well.
+            chunksize = 65536
+            output = StringIO()
+            while count > 0:
+                data = fp.read(min(count, chunksize))
+                if not data:
+                    raise util.Abort(_("%d bytes missing from remote file")
+                                     % count)
+                count -= len(data)
+                output.write(data)
+            return output.getvalue()
+
+        if rev.endswith("(DEAD)"):
+            raise IOError
+
+        args = ("-N -P -kk -r %s --" % rev).split()
+        args.append(self.cvsrepo + '/' + name)
+        for x in args:
+            self.writep.write("Argument %s\n" % x)
+        self.writep.write("Directory .\n%s\nco\n" % self.realroot)
+        self.writep.flush()
+
+        data = ""
+        mode = None
+        while 1:
+            line = self.readp.readline()
+            if line.startswith("Created ") or line.startswith("Updated "):
+                self.readp.readline() # path
+                self.readp.readline() # entries
+                mode = self.readp.readline()[:-1]
+                count = int(self.readp.readline()[:-1])
+                data = chunkedread(self.readp, count)
+            elif line.startswith(" "):
+                data += line[1:]
+            elif line.startswith("M "):
+                pass
+            elif line.startswith("Mbinary "):
+                count = int(self.readp.readline()[:-1])
+                data = chunkedread(self.readp, count)
+            else:
+                if line == "ok\n":
+                    if mode is None:
+                        raise util.Abort(_('malformed response from CVS'))
+                    return (data, "x" in mode and "x" or "")
+                elif line.startswith("E "):
+                    self.ui.warn(_("cvs server: %s\n") % line[2:])
+                elif line.startswith("Remove"):
+                    self.readp.readline()
+                else:
+                    raise util.Abort(_("unknown CVS response: %s") % line)
+
+    def getfile(self, file, rev):
+        self._parse()
+        data, mode = self._getfile(file, rev)
+        self.modecache[(file, rev)] = mode
+        return data
+
+    def getmode(self, file, rev):
+        return self.modecache[(file, rev)]
+
+    def getchanges(self, rev):
+        self._parse()
+        self.modecache = {}
+        return sorted(self.files[rev].iteritems()), {}
+
+    def getcommit(self, rev):
+        self._parse()
+        return self.changeset[rev]
+
+    def gettags(self):
+        self._parse()
+        return self.tags
+
+    def getchangedfiles(self, rev, i):
+        self._parse()
+        return sorted(self.files[rev])
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/cvsps.py b/plugins/hg4idea/testData/bin/hgext/convert/cvsps.py
new file mode 100644 (file)
index 0000000..38dbcff
--- /dev/null
@@ -0,0 +1,847 @@
+# Mercurial built-in replacement for cvsps.
+#
+# Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os
+import re
+import cPickle as pickle
+from mercurial import util
+from mercurial.i18n import _
+from mercurial import hook
+
+class logentry(object):
+    '''Class logentry has the following attributes:
+        .author    - author name as CVS knows it
+        .branch    - name of branch this revision is on
+        .branches  - revision tuple of branches starting at this revision
+        .comment   - commit message
+        .date      - the commit date as a (time, tz) tuple
+        .dead      - true if file revision is dead
+        .file      - Name of file
+        .lines     - a tuple (+lines, -lines) or None
+        .parent    - Previous revision of this entry
+        .rcs       - name of file as returned from CVS
+        .revision  - revision number as tuple
+        .tags      - list of tags on the file
+        .synthetic - is this a synthetic "file ... added on ..." revision?
+        .mergepoint- the branch that has been merged from
+                     (if present in rlog output)
+        .branchpoints- the branches that start at the current entry
+    '''
+    def __init__(self, **entries):
+        self.synthetic = False
+        self.__dict__.update(entries)
+
+    def __repr__(self):
+        return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
+                                        id(self),
+                                        self.file,
+                                        ".".join(map(str, self.revision)))
+
+class logerror(Exception):
+    pass
+
+def getrepopath(cvspath):
+    """Return the repository path from a CVS path.
+
+    >>> getrepopath('/foo/bar')
+    '/foo/bar'
+    >>> getrepopath('c:/foo/bar')
+    'c:/foo/bar'
+    >>> getrepopath(':pserver:10/foo/bar')
+    '/foo/bar'
+    >>> getrepopath(':pserver:10c:/foo/bar')
+    '/foo/bar'
+    >>> getrepopath(':pserver:/foo/bar')
+    '/foo/bar'
+    >>> getrepopath(':pserver:c:/foo/bar')
+    'c:/foo/bar'
+    >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
+    '/foo/bar'
+    >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
+    'c:/foo/bar'
+    """
+    # According to CVS manual, CVS paths are expressed like:
+    # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
+    #
+    # Unfortunately, Windows absolute paths start with a drive letter
+    # like 'c:' making it harder to parse. Here we assume that drive
+    # letters are only one character long and any CVS component before
+    # the repository path is at least 2 characters long, and use this
+    # to disambiguate.
+    parts = cvspath.split(':')
+    if len(parts) == 1:
+        return parts[0]
+    # Here there is an ambiguous case if we have a port number
+    # immediately followed by a Windows driver letter. We assume this
+    # never happens and decide it must be CVS path component,
+    # therefore ignoring it.
+    if len(parts[-2]) > 1:
+        return parts[-1].lstrip('0123456789')
+    return parts[-2] + ':' + parts[-1]
+
+def createlog(ui, directory=None, root="", rlog=True, cache=None):
+    '''Collect the CVS rlog'''
+
+    # Because we store many duplicate commit log messages, reusing strings
+    # saves a lot of memory and pickle storage space.
+    _scache = {}
+    def scache(s):
+        "return a shared version of a string"
+        return _scache.setdefault(s, s)
+
+    ui.status(_('collecting CVS rlog\n'))
+
+    log = []      # list of logentry objects containing the CVS state
+
+    # patterns to match in CVS (r)log output, by state of use
+    re_00 = re.compile('RCS file: (.+)$')
+    re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
+    re_02 = re.compile('cvs (r?log|server): (.+)\n$')
+    re_03 = re.compile("(Cannot access.+CVSROOT)|"
+                       "(can't create temporary directory.+)$")
+    re_10 = re.compile('Working file: (.+)$')
+    re_20 = re.compile('symbolic names:')
+    re_30 = re.compile('\t(.+): ([\\d.]+)$')
+    re_31 = re.compile('----------------------------$')
+    re_32 = re.compile('======================================='
+                       '======================================$')
+    re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
+    re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
+                       r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
+                       r'(.*mergepoint:\s+([^;]+);)?')
+    re_70 = re.compile('branches: (.+);$')
+
+    file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
+
+    prefix = ''   # leading path to strip of what we get from CVS
+
+    if directory is None:
+        # Current working directory
+
+        # Get the real directory in the repository
+        try:
+            prefix = open(os.path.join('CVS','Repository')).read().strip()
+            directory = prefix
+            if prefix == ".":
+                prefix = ""
+        except IOError:
+            raise logerror(_('not a CVS sandbox'))
+
+        if prefix and not prefix.endswith(os.sep):
+            prefix += os.sep
+
+        # Use the Root file in the sandbox, if it exists
+        try:
+            root = open(os.path.join('CVS','Root')).read().strip()
+        except IOError:
+            pass
+
+    if not root:
+        root = os.environ.get('CVSROOT', '')
+
+    # read log cache if one exists
+    oldlog = []
+    date = None
+
+    if cache:
+        cachedir = os.path.expanduser('~/.hg.cvsps')
+        if not os.path.exists(cachedir):
+            os.mkdir(cachedir)
+
+        # The cvsps cache pickle needs a uniquified name, based on the
+        # repository location. The address may have all sort of nasties
+        # in it, slashes, colons and such. So here we take just the
+        # alphanumerics, concatenated in a way that does not mix up the
+        # various components, so that
+        #    :pserver:user@server:/path
+        # and
+        #    /pserver/user/server/path
+        # are mapped to different cache file names.
+        cachefile = root.split(":") + [directory, "cache"]
+        cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
+        cachefile = os.path.join(cachedir,
+                                 '.'.join([s for s in cachefile if s]))
+
+    if cache == 'update':
+        try:
+            ui.note(_('reading cvs log cache %s\n') % cachefile)
+            oldlog = pickle.load(open(cachefile))
+            ui.note(_('cache has %d log entries\n') % len(oldlog))
+        except Exception, e:
+            ui.note(_('error reading cache: %r\n') % e)
+
+        if oldlog:
+            date = oldlog[-1].date    # last commit date as a (time,tz) tuple
+            date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
+
+    # build the CVS commandline
+    cmd = ['cvs', '-q']
+    if root:
+        cmd.append('-d%s' % root)
+        p = util.normpath(getrepopath(root))
+        if not p.endswith('/'):
+            p += '/'
+        if prefix:
+            # looks like normpath replaces "" by "."
+            prefix = p + util.normpath(prefix)
+        else:
+            prefix = p
+    cmd.append(['log', 'rlog'][rlog])
+    if date:
+        # no space between option and date string
+        cmd.append('-d>%s' % date)
+    cmd.append(directory)
+
+    # state machine begins here
+    tags = {}     # dictionary of revisions on current file with their tags
+    branchmap = {} # mapping between branch names and revision numbers
+    state = 0
+    store = False # set when a new record can be appended
+
+    cmd = [util.shellquote(arg) for arg in cmd]
+    ui.note(_("running %s\n") % (' '.join(cmd)))
+    ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
+
+    pfp = util.popen(' '.join(cmd))
+    peek = pfp.readline()
+    while True:
+        line = peek
+        if line == '':
+            break
+        peek = pfp.readline()
+        if line.endswith('\n'):
+            line = line[:-1]
+        #ui.debug('state=%d line=%r\n' % (state, line))
+
+        if state == 0:
+            # initial state, consume input until we see 'RCS file'
+            match = re_00.match(line)
+            if match:
+                rcs = match.group(1)
+                tags = {}
+                if rlog:
+                    filename = util.normpath(rcs[:-2])
+                    if filename.startswith(prefix):
+                        filename = filename[len(prefix):]
+                    if filename.startswith('/'):
+                        filename = filename[1:]
+                    if filename.startswith('Attic/'):
+                        filename = filename[6:]
+                    else:
+                        filename = filename.replace('/Attic/', '/')
+                    state = 2
+                    continue
+                state = 1
+                continue
+            match = re_01.match(line)
+            if match:
+                raise Exception(match.group(1))
+            match = re_02.match(line)
+            if match:
+                raise Exception(match.group(2))
+            if re_03.match(line):
+                raise Exception(line)
+
+        elif state == 1:
+            # expect 'Working file' (only when using log instead of rlog)
+            match = re_10.match(line)
+            assert match, _('RCS file must be followed by working file')
+            filename = util.normpath(match.group(1))
+            state = 2
+
+        elif state == 2:
+            # expect 'symbolic names'
+            if re_20.match(line):
+                branchmap = {}
+                state = 3
+
+        elif state == 3:
+            # read the symbolic names and store as tags
+            match = re_30.match(line)
+            if match:
+                rev = [int(x) for x in match.group(2).split('.')]
+
+                # Convert magic branch number to an odd-numbered one
+                revn = len(rev)
+                if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
+                    rev = rev[:-2] + rev[-1:]
+                rev = tuple(rev)
+
+                if rev not in tags:
+                    tags[rev] = []
+                tags[rev].append(match.group(1))
+                branchmap[match.group(1)] = match.group(2)
+
+            elif re_31.match(line):
+                state = 5
+            elif re_32.match(line):
+                state = 0
+
+        elif state == 4:
+            # expecting '------' separator before first revision
+            if re_31.match(line):
+                state = 5
+            else:
+                assert not re_32.match(line), _('must have at least '
+                                                'some revisions')
+
+        elif state == 5:
+            # expecting revision number and possibly (ignored) lock indication
+            # we create the logentry here from values stored in states 0 to 4,
+            # as this state is re-entered for subsequent revisions of a file.
+            match = re_50.match(line)
+            assert match, _('expected revision number')
+            e = logentry(rcs=scache(rcs), file=scache(filename),
+                    revision=tuple([int(x) for x in match.group(1).split('.')]),
+                    branches=[], parent=None)
+            state = 6
+
+        elif state == 6:
+            # expecting date, author, state, lines changed
+            match = re_60.match(line)
+            assert match, _('revision must be followed by date line')
+            d = match.group(1)
+            if d[2] == '/':
+                # Y2K
+                d = '19' + d
+
+            if len(d.split()) != 3:
+                # cvs log dates always in GMT
+                d = d + ' UTC'
+            e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
+                                        '%Y/%m/%d %H:%M:%S',
+                                        '%Y-%m-%d %H:%M:%S'])
+            e.author = scache(match.group(2))
+            e.dead = match.group(3).lower() == 'dead'
+
+            if match.group(5):
+                if match.group(6):
+                    e.lines = (int(match.group(5)), int(match.group(6)))
+                else:
+                    e.lines = (int(match.group(5)), 0)
+            elif match.group(6):
+                e.lines = (0, int(match.group(6)))
+            else:
+                e.lines = None
+
+            if match.group(7): # cvsnt mergepoint
+                myrev = match.group(8).split('.')
+                if len(myrev) == 2: # head
+                    e.mergepoint = 'HEAD'
+                else:
+                    myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
+                    branches = [b for b in branchmap if branchmap[b] == myrev]
+                    assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
+                    e.mergepoint = branches[0]
+            else:
+                e.mergepoint = None
+            e.comment = []
+            state = 7
+
+        elif state == 7:
+            # read the revision numbers of branches that start at this revision
+            # or store the commit log message otherwise
+            m = re_70.match(line)
+            if m:
+                e.branches = [tuple([int(y) for y in x.strip().split('.')])
+                                for x in m.group(1).split(';')]
+                state = 8
+            elif re_31.match(line) and re_50.match(peek):
+                state = 5
+                store = True
+            elif re_32.match(line):
+                state = 0
+                store = True
+            else:
+                e.comment.append(line)
+
+        elif state == 8:
+            # store commit log message
+            if re_31.match(line):
+                state = 5
+                store = True
+            elif re_32.match(line):
+                state = 0
+                store = True
+            else:
+                e.comment.append(line)
+
+        # When a file is added on a branch B1, CVS creates a synthetic
+        # dead trunk revision 1.1 so that the branch has a root.
+        # Likewise, if you merge such a file to a later branch B2 (one
+        # that already existed when the file was added on B1), CVS
+        # creates a synthetic dead revision 1.1.x.1 on B2.  Don't drop
+        # these revisions now, but mark them synthetic so
+        # createchangeset() can take care of them.
+        if (store and
+              e.dead and
+              e.revision[-1] == 1 and      # 1.1 or 1.1.x.1
+              len(e.comment) == 1 and
+              file_added_re.match(e.comment[0])):
+            ui.debug('found synthetic revision in %s: %r\n'
+                     % (e.rcs, e.comment[0]))
+            e.synthetic = True
+
+        if store:
+            # clean up the results and save in the log.
+            store = False
+            e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
+            e.comment = scache('\n'.join(e.comment))
+
+            revn = len(e.revision)
+            if revn > 3 and (revn % 2) == 0:
+                e.branch = tags.get(e.revision[:-1], [None])[0]
+            else:
+                e.branch = None
+
+            # find the branches starting from this revision
+            branchpoints = set()
+            for branch, revision in branchmap.iteritems():
+                revparts = tuple([int(i) for i in revision.split('.')])
+                if len(revparts) < 2: # bad tags
+                    continue
+                if revparts[-2] == 0 and revparts[-1] % 2 == 0:
+                    # normal branch
+                    if revparts[:-2] == e.revision:
+                        branchpoints.add(branch)
+                elif revparts == (1, 1, 1): # vendor branch
+                    if revparts in e.branches:
+                        branchpoints.add(branch)
+            e.branchpoints = branchpoints
+
+            log.append(e)
+
+            if len(log) % 100 == 0:
+                ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
+
+    log.sort(key=lambda x: (x.rcs, x.revision))
+
+    # find parent revisions of individual files
+    versions = {}
+    for e in log:
+        branch = e.revision[:-1]
+        p = versions.get((e.rcs, branch), None)
+        if p is None:
+            p = e.revision[:-2]
+        e.parent = p
+        versions[(e.rcs, branch)] = e.revision
+
+    # update the log cache
+    if cache:
+        if log:
+            # join up the old and new logs
+            log.sort(key=lambda x: x.date)
+
+            if oldlog and oldlog[-1].date >= log[0].date:
+                raise logerror(_('log cache overlaps with new log entries,'
+                                 ' re-run without cache.'))
+
+            log = oldlog + log
+
+            # write the new cachefile
+            ui.note(_('writing cvs log cache %s\n') % cachefile)
+            pickle.dump(log, open(cachefile, 'w'))
+        else:
+            log = oldlog
+
+    ui.status(_('%d log entries\n') % len(log))
+
+    hook.hook(ui, None, "cvslog", True, log=log)
+
+    return log
+
+
+class changeset(object):
+    '''Class changeset has the following attributes:
+        .id        - integer identifying this changeset (list index)
+        .author    - author name as CVS knows it
+        .branch    - name of branch this changeset is on, or None
+        .comment   - commit message
+        .date      - the commit date as a (time,tz) tuple
+        .entries   - list of logentry objects in this changeset
+        .parents   - list of one or two parent changesets
+        .tags      - list of tags on this changeset
+        .synthetic - from synthetic revision "file ... added on branch ..."
+        .mergepoint- the branch that has been merged from
+                     (if present in rlog output)
+        .branchpoints- the branches that start at the current entry
+    '''
+    def __init__(self, **entries):
+        self.synthetic = False
+        self.__dict__.update(entries)
+
+    def __repr__(self):
+        return "<%s at 0x%x: %s>" % (self.__class__.__name__,
+                                     id(self),
+                                     getattr(self, 'id', "(no id)"))
+
+def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
+    '''Convert log into changesets.'''
+
+    ui.status(_('creating changesets\n'))
+
+    # Merge changesets
+
+    log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
+
+    changesets = []
+    files = set()
+    c = None
+    for i, e in enumerate(log):
+
+        # Check if log entry belongs to the current changeset or not.
+
+        # Since CVS is file centric, two different file revisions with
+        # different branchpoints should be treated as belonging to two
+        # different changesets (and the ordering is important and not
+        # honoured by cvsps at this point).
+        #
+        # Consider the following case:
+        # foo 1.1 branchpoints: [MYBRANCH]
+        # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
+        #
+        # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
+        # later version of foo may be in MYBRANCH2, so foo should be the
+        # first changeset and bar the next and MYBRANCH and MYBRANCH2
+        # should both start off of the bar changeset. No provisions are
+        # made to ensure that this is, in fact, what happens.
+        if not (c and
+                  e.comment == c.comment and
+                  e.author == c.author and
+                  e.branch == c.branch and
+                  (not hasattr(e, 'branchpoints') or
+                    not hasattr (c, 'branchpoints') or
+                    e.branchpoints == c.branchpoints) and
+                  ((c.date[0] + c.date[1]) <=
+                   (e.date[0] + e.date[1]) <=
+                   (c.date[0] + c.date[1]) + fuzz) and
+                  e.file not in files):
+            c = changeset(comment=e.comment, author=e.author,
+                          branch=e.branch, date=e.date, entries=[],
+                          mergepoint=getattr(e, 'mergepoint', None),
+                          branchpoints=getattr(e, 'branchpoints', set()))
+            changesets.append(c)
+            files = set()
+            if len(changesets) % 100 == 0:
+                t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
+                ui.status(util.ellipsis(t, 80) + '\n')
+
+        c.entries.append(e)
+        files.add(e.file)
+        c.date = e.date       # changeset date is date of latest commit in it
+
+    # Mark synthetic changesets
+
+    for c in changesets:
+        # Synthetic revisions always get their own changeset, because
+        # the log message includes the filename.  E.g. if you add file3
+        # and file4 on a branch, you get four log entries and three
+        # changesets:
+        #   "File file3 was added on branch ..." (synthetic, 1 entry)
+        #   "File file4 was added on branch ..." (synthetic, 1 entry)
+        #   "Add file3 and file4 to fix ..."     (real, 2 entries)
+        # Hence the check for 1 entry here.
+        c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
+
+    # Sort files in each changeset
+
+    for c in changesets:
+        def pathcompare(l, r):
+            'Mimic cvsps sorting order'
+            l = l.split('/')
+            r = r.split('/')
+            nl = len(l)
+            nr = len(r)
+            n = min(nl, nr)
+            for i in range(n):
+                if i + 1 == nl and nl < nr:
+                    return -1
+                elif i + 1 == nr and nl > nr:
+                    return +1
+                elif l[i] < r[i]:
+                    return -1
+                elif l[i] > r[i]:
+                    return +1
+            return 0
+        def entitycompare(l, r):
+            return pathcompare(l.file, r.file)
+
+        c.entries.sort(entitycompare)
+
+    # Sort changesets by date
+
+    def cscmp(l, r):
+        d = sum(l.date) - sum(r.date)
+        if d:
+            return d
+
+        # detect vendor branches and initial commits on a branch
+        le = {}
+        for e in l.entries:
+            le[e.rcs] = e.revision
+        re = {}
+        for e in r.entries:
+            re[e.rcs] = e.revision
+
+        d = 0
+        for e in l.entries:
+            if re.get(e.rcs, None) == e.parent:
+                assert not d
+                d = 1
+                break
+
+        for e in r.entries:
+            if le.get(e.rcs, None) == e.parent:
+                assert not d
+                d = -1
+                break
+
+        return d
+
+    changesets.sort(cscmp)
+
+    # Collect tags
+
+    globaltags = {}
+    for c in changesets:
+        for e in c.entries:
+            for tag in e.tags:
+                # remember which is the latest changeset to have this tag
+                globaltags[tag] = c
+
+    for c in changesets:
+        tags = set()
+        for e in c.entries:
+            tags.update(e.tags)
+        # remember tags only if this is the latest changeset to have it
+        c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
+
+    # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
+    # by inserting dummy changesets with two parents, and handle
+    # {{mergefrombranch BRANCHNAME}} by setting two parents.
+
+    if mergeto is None:
+        mergeto = r'{{mergetobranch ([-\w]+)}}'
+    if mergeto:
+        mergeto = re.compile(mergeto)
+
+    if mergefrom is None:
+        mergefrom = r'{{mergefrombranch ([-\w]+)}}'
+    if mergefrom:
+        mergefrom = re.compile(mergefrom)
+
+    versions = {}    # changeset index where we saw any particular file version
+    branches = {}    # changeset index where we saw a branch
+    n = len(changesets)
+    i = 0
+    while i < n:
+        c = changesets[i]
+
+        for f in c.entries:
+            versions[(f.rcs, f.revision)] = i
+
+        p = None
+        if c.branch in branches:
+            p = branches[c.branch]
+        else:
+            # first changeset on a new branch
+            # the parent is a changeset with the branch in its
+            # branchpoints such that it is the latest possible
+            # commit without any intervening, unrelated commits.
+
+            for candidate in xrange(i):
+                if c.branch not in changesets[candidate].branchpoints:
+                    if p is not None:
+                        break
+                    continue
+                p = candidate
+
+        c.parents = []
+        if p is not None:
+            p = changesets[p]
+
+            # Ensure no changeset has a synthetic changeset as a parent.
+            while p.synthetic:
+                assert len(p.parents) <= 1, \
+                       _('synthetic changeset cannot have multiple parents')
+                if p.parents:
+                    p = p.parents[0]
+                else:
+                    p = None
+                    break
+
+            if p is not None:
+                c.parents.append(p)
+
+        if c.mergepoint:
+            if c.mergepoint == 'HEAD':
+                c.mergepoint = None
+            c.parents.append(changesets[branches[c.mergepoint]])
+
+        if mergefrom:
+            m = mergefrom.search(c.comment)
+            if m:
+                m = m.group(1)
+                if m == 'HEAD':
+                    m = None
+                try:
+                    candidate = changesets[branches[m]]
+                except KeyError:
+                    ui.warn(_("warning: CVS commit message references "
+                              "non-existent branch %r:\n%s\n")
+                            % (m, c.comment))
+                if m in branches and c.branch != m and not candidate.synthetic:
+                    c.parents.append(candidate)
+
+        if mergeto:
+            m = mergeto.search(c.comment)
+            if m:
+                try:
+                    m = m.group(1)
+                    if m == 'HEAD':
+                        m = None
+                except:
+                    m = None   # if no group found then merge to HEAD
+                if m in branches and c.branch != m:
+                    # insert empty changeset for merge
+                    cc = changeset(
+                        author=c.author, branch=m, date=c.date,
+                        comment='convert-repo: CVS merge from branch %s'
+                        % c.branch,
+                        entries=[], tags=[],
+                        parents=[changesets[branches[m]], c])
+                    changesets.insert(i + 1, cc)
+                    branches[m] = i + 1
+
+                    # adjust our loop counters now we have inserted a new entry
+                    n += 1
+                    i += 2
+                    continue
+
+        branches[c.branch] = i
+        i += 1
+
+    # Drop synthetic changesets (safe now that we have ensured no other
+    # changesets can have them as parents).
+    i = 0
+    while i < len(changesets):
+        if changesets[i].synthetic:
+            del changesets[i]
+        else:
+            i += 1
+
+    # Number changesets
+
+    for i, c in enumerate(changesets):
+        c.id = i + 1
+
+    ui.status(_('%d changeset entries\n') % len(changesets))
+
+    hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
+
+    return changesets
+
+
+def debugcvsps(ui, *args, **opts):
+    '''Read CVS rlog for current directory or named path in
+    repository, and convert the log to changesets based on matching
+    commit log entries and dates.
+    '''
+    if opts["new_cache"]:
+        cache = "write"
+    elif opts["update_cache"]:
+        cache = "update"
+    else:
+        cache = None
+
+    revisions = opts["revisions"]
+
+    try:
+        if args:
+            log = []
+            for d in args:
+                log += createlog(ui, d, root=opts["root"], cache=cache)
+        else:
+            log = createlog(ui, root=opts["root"], cache=cache)
+    except logerror, e:
+        ui.write("%r\n"%e)
+        return
+
+    changesets = createchangeset(ui, log, opts["fuzz"])
+    del log
+
+    # Print changesets (optionally filtered)
+
+    off = len(revisions)
+    branches = {}    # latest version number in each branch
+    ancestors = {}   # parent branch
+    for cs in changesets:
+
+        if opts["ancestors"]:
+            if cs.branch not in branches and cs.parents and cs.parents[0].id:
+                ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
+                                        cs.parents[0].id)
+            branches[cs.branch] = cs.id
+
+        # limit by branches
+        if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
+            continue
+
+        if not off:
+            # Note: trailing spaces on several lines here are needed to have
+            #       bug-for-bug compatibility with cvsps.
+            ui.write('---------------------\n')
+            ui.write('PatchSet %d \n' % cs.id)
+            ui.write('Date: %s\n' % util.datestr(cs.date,
+                                                 '%Y/%m/%d %H:%M:%S %1%2'))
+            ui.write('Author: %s\n' % cs.author)
+            ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
+            ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
+                                  ','.join(cs.tags) or '(none)'))
+            branchpoints = getattr(cs, 'branchpoints', None)
+            if branchpoints:
+                ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
+            if opts["parents"] and cs.parents:
+                if len(cs.parents) > 1:
+                    ui.write('Parents: %s\n' %
+                             (','.join([str(p.id) for p in cs.parents])))
+                else:
+                    ui.write('Parent: %d\n' % cs.parents[0].id)
+
+            if opts["ancestors"]:
+                b = cs.branch
+                r = []
+                while b:
+                    b, c = ancestors[b]
+                    r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
+                if r:
+                    ui.write('Ancestors: %s\n' % (','.join(r)))
+
+            ui.write('Log:\n')
+            ui.write('%s\n\n' % cs.comment)
+            ui.write('Members: \n')
+            for f in cs.entries:
+                fn = f.file
+                if fn.startswith(opts["prefix"]):
+                    fn = fn[len(opts["prefix"]):]
+                ui.write('\t%s:%s->%s%s \n' % (
+                        fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
+                        '.'.join([str(x) for x in f.revision]),
+                        ['', '(DEAD)'][f.dead]))
+            ui.write('\n')
+
+        # have we seen the start tag?
+        if revisions and off:
+            if revisions[0] == str(cs.id) or \
+                revisions[0] in cs.tags:
+                off = False
+
+        # see if we reached the end tag
+        if len(revisions) > 1 and not off:
+            if revisions[1] == str(cs.id) or \
+                revisions[1] in cs.tags:
+                break
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/darcs.py b/plugins/hg4idea/testData/bin/hgext/convert/darcs.py
new file mode 100644 (file)
index 0000000..4e46aa1
--- /dev/null
@@ -0,0 +1,167 @@
+# darcs.py - darcs support for the convert extension
+#
+#  Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from common import NoRepo, checktool, commandline, commit, converter_source
+from mercurial.i18n import _
+from mercurial import util
+import os, shutil, tempfile
+
+# The naming drift of ElementTree is fun!
+
+try:
+    from xml.etree.cElementTree import ElementTree
+except ImportError:
+    try:
+        from xml.etree.ElementTree import ElementTree
+    except ImportError:
+        try:
+            from elementtree.cElementTree import ElementTree
+        except ImportError:
+            try:
+                from elementtree.ElementTree import ElementTree
+            except ImportError:
+                ElementTree = None
+
+class darcs_source(converter_source, commandline):
+    def __init__(self, ui, path, rev=None):
+        converter_source.__init__(self, ui, path, rev=rev)
+        commandline.__init__(self, ui, 'darcs')
+
+        # check for _darcs, ElementTree, _darcs/inventory so that we can
+        # easily skip test-convert-darcs if ElementTree is not around
+        if not os.path.exists(os.path.join(path, '_darcs', 'inventories')):
+            raise NoRepo(_("%s does not look like a darcs repository") % path)
+
+        if not os.path.exists(os.path.join(path, '_darcs')):
+            raise NoRepo(_("%s does not look like a darcs repository") % path)
+
+        checktool('darcs')
+        version = self.run0('--version').splitlines()[0].strip()
+        if version < '2.1':
+            raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') %
+                             version)
+
+        if ElementTree is None:
+            raise util.Abort(_("Python ElementTree module is not available"))
+
+        self.path = os.path.realpath(path)
+
+        self.lastrev = None
+        self.changes = {}
+        self.parents = {}
+        self.tags = {}
+
+    def before(self):
+        self.tmppath = tempfile.mkdtemp(
+            prefix='convert-' + os.path.basename(self.path) + '-')
+        output, status = self.run('init', repodir=self.tmppath)
+        self.checkexit(status)
+
+        tree = self.xml('changes', xml_output=True, summary=True,
+                        repodir=self.path)
+        tagname = None
+        child = None
+        for elt in tree.findall('patch'):
+            node = elt.get('hash')
+            name = elt.findtext('name', '')
+            if name.startswith('TAG '):
+                tagname = name[4:].strip()
+            elif tagname is not None:
+                self.tags[tagname] = node
+                tagname = None
+            self.changes[node] = elt
+            self.parents[child] = [node]
+            child = node
+        self.parents[child] = []
+
+    def after(self):
+        self.ui.debug('cleaning up %s\n' % self.tmppath)
+        shutil.rmtree(self.tmppath, ignore_errors=True)
+
+    def xml(self, cmd, **kwargs):
+        etree = ElementTree()
+        fp = self._run(cmd, **kwargs)
+        etree.parse(fp)
+        self.checkexit(fp.close())
+        return etree.getroot()
+
+    def manifest(self):
+        man = []
+        output, status = self.run('show', 'files', no_directories=True,
+                                  repodir=self.tmppath)
+        self.checkexit(status)
+        for line in output.split('\n'):
+            path = line[2:]
+            if path:
+                man.append(path)
+        return man
+
+    def getheads(self):
+        return self.parents[None]
+
+    def getcommit(self, rev):
+        elt = self.changes[rev]
+        date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y')
+        desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
+        return commit(author=elt.get('author'), date=util.datestr(date),
+                      desc=desc.strip(), parents=self.parents[rev])
+
+    def pull(self, rev):
+        output, status = self.run('pull', self.path, all=True,
+                                  match='hash %s' % rev,
+                                  no_test=True, no_posthook=True,
+                                  external_merge='/bin/false',
+                                  repodir=self.tmppath)
+        if status:
+            if output.find('We have conflicts in') == -1:
+                self.checkexit(status, output)
+            output, status = self.run('revert', all=True, repodir=self.tmppath)
+            self.checkexit(status, output)
+
+    def getchanges(self, rev):
+        copies = {}
+        changes = []
+        man = None
+        for elt in self.changes[rev].find('summary').getchildren():
+            if elt.tag in ('add_directory', 'remove_directory'):
+                continue
+            if elt.tag == 'move':
+                if man is None:
+                    man = self.manifest()
+                source, dest = elt.get('from'), elt.get('to')
+                if source in man:
+                    # File move
+                    changes.append((source, rev))
+                    changes.append((dest, rev))
+                    copies[dest] = source
+                else:
+                    # Directory move, deduce file moves from manifest
+                    source = source + '/'
+                    for f in man:
+                        if not f.startswith(source):
+                            continue
+                        fdest = dest + '/' + f[len(source):]
+                        changes.append((f, rev))
+                        changes.append((fdest, rev))
+                        copies[fdest] = f
+            else:
+                changes.append((elt.text.strip(), rev))
+        self.pull(rev)
+        self.lastrev = rev
+        return sorted(changes), copies
+
+    def getfile(self, name, rev):
+        if rev != self.lastrev:
+            raise util.Abort(_('internal calling inconsistency'))
+        return open(os.path.join(self.tmppath, name), 'rb').read()
+
+    def getmode(self, name, rev):
+        mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
+        return (mode & 0111) and 'x' or ''
+
+    def gettags(self):
+        return self.tags
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/filemap.py b/plugins/hg4idea/testData/bin/hgext/convert/filemap.py
new file mode 100644 (file)
index 0000000..e09a991
--- /dev/null
@@ -0,0 +1,359 @@
+# Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
+# Copyright 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import shlex
+from mercurial.i18n import _
+from mercurial import util
+from common import SKIPREV, converter_source
+
+def rpairs(name):
+    e = len(name)
+    while e != -1:
+        yield name[:e], name[e + 1:]
+        e = name.rfind('/', 0, e)
+    yield '.', name
+
+class filemapper(object):
+    '''Map and filter filenames when importing.
+    A name can be mapped to itself, a new name, or None (omit from new
+    repository).'''
+
+    def __init__(self, ui, path=None):
+        self.ui = ui
+        self.include = {}
+        self.exclude = {}
+        self.rename = {}
+        if path:
+            if self.parse(path):
+                raise util.Abort(_('errors in filemap'))
+
+    def parse(self, path):
+        errs = 0
+        def check(name, mapping, listname):
+            if name in mapping:
+                self.ui.warn(_('%s:%d: %r already in %s list\n') %
+                             (lex.infile, lex.lineno, name, listname))
+                return 1
+            return 0
+        lex = shlex.shlex(open(path), path, True)
+        lex.wordchars += '!@#$%^&*()-=+[]{}|;:,./<>?'
+        cmd = lex.get_token()
+        while cmd:
+            if cmd == 'include':
+                name = lex.get_token()
+                errs += check(name, self.exclude, 'exclude')
+                self.include[name] = name
+            elif cmd == 'exclude':
+                name = lex.get_token()
+                errs += check(name, self.include, 'include')
+                errs += check(name, self.rename, 'rename')
+                self.exclude[name] = name
+            elif cmd == 'rename':
+                src = lex.get_token()
+                dest = lex.get_token()
+                errs += check(src, self.exclude, 'exclude')
+                self.rename[src] = dest
+            elif cmd == 'source':
+                errs += self.parse(lex.get_token())
+            else:
+                self.ui.warn(_('%s:%d: unknown directive %r\n') %
+                             (lex.infile, lex.lineno, cmd))
+                errs += 1
+            cmd = lex.get_token()
+        return errs
+
+    def lookup(self, name, mapping):
+        for pre, suf in rpairs(name):
+            try:
+                return mapping[pre], pre, suf
+            except KeyError:
+                pass
+        return '', name, ''
+
+    def __call__(self, name):
+        if self.include:
+            inc = self.lookup(name, self.include)[0]
+        else:
+            inc = name
+        if self.exclude:
+            exc = self.lookup(name, self.exclude)[0]
+        else:
+            exc = ''
+        if (not self.include and exc) or (len(inc) <= len(exc)):
+            return None
+        newpre, pre, suf = self.lookup(name, self.rename)
+        if newpre:
+            if newpre == '.':
+                return suf
+            if suf:
+                return newpre + '/' + suf
+            return newpre
+        return name
+
+    def active(self):
+        return bool(self.include or self.exclude or self.rename)
+
+# This class does two additional things compared to a regular source:
+#
+# - Filter and rename files.  This is mostly wrapped by the filemapper
+#   class above. We hide the original filename in the revision that is
+#   returned by getchanges to be able to find things later in getfile
+#   and getmode.
+#
+# - Return only revisions that matter for the files we're interested in.
+#   This involves rewriting the parents of the original revision to
+#   create a graph that is restricted to those revisions.
+#
+#   This set of revisions includes not only revisions that directly
+#   touch files we're interested in, but also merges that merge two
+#   or more interesting revisions.
+
+class filemap_source(converter_source):
+    def __init__(self, ui, baseconverter, filemap):
+        super(filemap_source, self).__init__(ui)
+        self.base = baseconverter
+        self.filemapper = filemapper(ui, filemap)
+        self.commits = {}
+        # if a revision rev has parent p in the original revision graph, then
+        # rev will have parent self.parentmap[p] in the restricted graph.
+        self.parentmap = {}
+        # self.wantedancestors[rev] is the set of all ancestors of rev that
+        # are in the restricted graph.
+        self.wantedancestors = {}
+        self.convertedorder = None
+        self._rebuilt = False
+        self.origparents = {}
+        self.children = {}
+        self.seenchildren = {}
+
+    def before(self):
+        self.base.before()
+
+    def after(self):
+        self.base.after()
+
+    def setrevmap(self, revmap):
+        # rebuild our state to make things restartable
+        #
+        # To avoid calling getcommit for every revision that has already
+        # been converted, we rebuild only the parentmap, delaying the
+        # rebuild of wantedancestors until we need it (i.e. until a
+        # merge).
+        #
+        # We assume the order argument lists the revisions in
+        # topological order, so that we can infer which revisions were
+        # wanted by previous runs.
+        self._rebuilt = not revmap
+        seen = {SKIPREV: SKIPREV}
+        dummyset = set()
+        converted = []
+        for rev in revmap.order:
+            mapped = revmap[rev]
+            wanted = mapped not in seen
+            if wanted:
+                seen[mapped] = rev
+                self.parentmap[rev] = rev
+            else:
+                self.parentmap[rev] = seen[mapped]
+            self.wantedancestors[rev] = dummyset
+            arg = seen[mapped]
+            if arg == SKIPREV:
+                arg = None
+            converted.append((rev, wanted, arg))
+        self.convertedorder = converted
+        return self.base.setrevmap(revmap)
+
+    def rebuild(self):
+        if self._rebuilt:
+            return True
+        self._rebuilt = True
+        self.parentmap.clear()
+        self.wantedancestors.clear()
+        self.seenchildren.clear()
+        for rev, wanted, arg in self.convertedorder:
+            if rev not in self.origparents:
+                self.origparents[rev] = self.getcommit(rev).parents
+            if arg is not None:
+                self.children[arg] = self.children.get(arg, 0) + 1
+
+        for rev, wanted, arg in self.convertedorder:
+            parents = self.origparents[rev]
+            if wanted:
+                self.mark_wanted(rev, parents)
+            else:
+                self.mark_not_wanted(rev, arg)
+            self._discard(arg, *parents)
+
+        return True
+
+    def getheads(self):
+        return self.base.getheads()
+
+    def getcommit(self, rev):
+        # We want to save a reference to the commit objects to be able
+        # to rewrite their parents later on.
+        c = self.commits[rev] = self.base.getcommit(rev)
+        for p in c.parents:
+            self.children[p] = self.children.get(p, 0) + 1
+        return c
+
+    def _discard(self, *revs):
+        for r in revs:
+            if r is None:
+                continue
+            self.seenchildren[r] = self.seenchildren.get(r, 0) + 1
+            if self.seenchildren[r] == self.children[r]:
+                del self.wantedancestors[r]
+                del self.parentmap[r]
+                del self.seenchildren[r]
+                if self._rebuilt:
+                    del self.children[r]
+
+    def wanted(self, rev, i):
+        # Return True if we're directly interested in rev.
+        #
+        # i is an index selecting one of the parents of rev (if rev
+        # has no parents, i is None).  getchangedfiles will give us
+        # the list of files that are different in rev and in the parent
+        # indicated by i.  If we're interested in any of these files,
+        # we're interested in rev.
+        try:
+            files = self.base.getchangedfiles(rev, i)
+        except NotImplementedError:
+            raise util.Abort(_("source repository doesn't support --filemap"))
+        for f in files:
+            if self.filemapper(f):
+                return True
+        return False
+
+    def mark_not_wanted(self, rev, p):
+        # Mark rev as not interesting and update data structures.
+
+        if p is None:
+            # A root revision. Use SKIPREV to indicate that it doesn't
+            # map to any revision in the restricted graph.  Put SKIPREV
+            # in the set of wanted ancestors to simplify code elsewhere
+            self.parentmap[rev] = SKIPREV
+            self.wantedancestors[rev] = set((SKIPREV,))
+            return
+
+        # Reuse the data from our parent.
+        self.parentmap[rev] = self.parentmap[p]
+        self.wantedancestors[rev] = self.wantedancestors[p]
+
+    def mark_wanted(self, rev, parents):
+        # Mark rev ss wanted and update data structures.
+
+        # rev will be in the restricted graph, so children of rev in
+        # the original graph should still have rev as a parent in the
+        # restricted graph.
+        self.parentmap[rev] = rev
+
+        # The set of wanted ancestors of rev is the union of the sets
+        # of wanted ancestors of its parents. Plus rev itself.
+        wrev = set()
+        for p in parents:
+            wrev.update(self.wantedancestors[p])
+        wrev.add(rev)
+        self.wantedancestors[rev] = wrev
+
+    def getchanges(self, rev):
+        parents = self.commits[rev].parents
+        if len(parents) > 1:
+            self.rebuild()
+
+        # To decide whether we're interested in rev we:
+        #
+        # - calculate what parents rev will have if it turns out we're
+        #   interested in it.  If it's going to have more than 1 parent,
+        #   we're interested in it.
+        #
+        # - otherwise, we'll compare it with the single parent we found.
+        #   If any of the files we're interested in is different in the
+        #   the two revisions, we're interested in rev.
+
+        # A parent p is interesting if its mapped version (self.parentmap[p]):
+        # - is not SKIPREV
+        # - is still not in the list of parents (we don't want duplicates)
+        # - is not an ancestor of the mapped versions of the other parents
+        mparents = []
+        wp = None
+        for i, p1 in enumerate(parents):
+            mp1 = self.parentmap[p1]
+            if mp1 == SKIPREV or mp1 in mparents:
+                continue
+            for p2 in parents:
+                if p1 == p2 or mp1 == self.parentmap[p2]:
+                    continue
+                if mp1 in self.wantedancestors[p2]:
+                    break
+            else:
+                mparents.append(mp1)
+                wp = i
+
+        if wp is None and parents:
+            wp = 0
+
+        self.origparents[rev] = parents
+
+        if len(mparents) < 2 and not self.wanted(rev, wp):
+            # We don't want this revision.
+            # Update our state and tell the convert process to map this
+            # revision to the same revision its parent as mapped to.
+            p = None
+            if parents:
+                p = parents[wp]
+            self.mark_not_wanted(rev, p)
+            self.convertedorder.append((rev, False, p))
+            self._discard(*parents)
+            return self.parentmap[rev]
+
+        # We want this revision.
+        # Rewrite the parents of the commit object
+        self.commits[rev].parents = mparents
+        self.mark_wanted(rev, parents)
+        self.convertedorder.append((rev, True, None))
+        self._discard(*parents)
+
+        # Get the real changes and do the filtering/mapping.
+        # To be able to get the files later on in getfile and getmode,
+        # we hide the original filename in the rev part of the return
+        # value.
+        changes, copies = self.base.getchanges(rev)
+        newnames = {}
+        files = []
+        for f, r in changes:
+            newf = self.filemapper(f)
+            if newf:
+                files.append((newf, (f, r)))
+                newnames[f] = newf
+
+        ncopies = {}
+        for c in copies:
+            newc = self.filemapper(c)
+            if newc:
+                newsource = self.filemapper(copies[c])
+                if newsource:
+                    ncopies[newc] = newsource
+
+        return files, ncopies
+
+    def getfile(self, name, rev):
+        realname, realrev = rev
+        return self.base.getfile(realname, realrev)
+
+    def getmode(self, name, rev):
+        realname, realrev = rev
+        return self.base.getmode(realname, realrev)
+
+    def gettags(self):
+        return self.base.gettags()
+
+    def hasnativeorder(self):
+        return self.base.hasnativeorder()
+
+    def lookuprev(self, rev):
+        return self.base.lookuprev(rev)
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/git.py b/plugins/hg4idea/testData/bin/hgext/convert/git.py
new file mode 100644 (file)
index 0000000..ed08f01
--- /dev/null
@@ -0,0 +1,170 @@
+# git.py - git support for the convert extension
+#
+#  Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os
+from mercurial import util
+from mercurial.i18n import _
+
+from common import NoRepo, commit, converter_source, checktool
+
+class convert_git(converter_source):
+    # Windows does not support GIT_DIR= construct while other systems
+    # cannot remove environment variable. Just assume none have
+    # both issues.
+    if hasattr(os, 'unsetenv'):
+        def gitopen(self, s):
+            prevgitdir = os.environ.get('GIT_DIR')
+            os.environ['GIT_DIR'] = self.path
+            try:
+                return util.popen(s, 'rb')
+            finally:
+                if prevgitdir is None:
+                    del os.environ['GIT_DIR']
+                else:
+                    os.environ['GIT_DIR'] = prevgitdir
+    else:
+        def gitopen(self, s):
+            return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
+
+    def gitread(self, s):
+        fh = self.gitopen(s)
+        data = fh.read()
+        return data, fh.close()
+
+    def __init__(self, ui, path, rev=None):
+        super(convert_git, self).__init__(ui, path, rev=rev)
+
+        if os.path.isdir(path + "/.git"):
+            path += "/.git"
+        if not os.path.exists(path + "/objects"):
+            raise NoRepo(_("%s does not look like a Git repository") % path)
+
+        checktool('git', 'git')
+
+        self.path = path
+
+    def getheads(self):
+        if not self.rev:
+            heads, ret = self.gitread('git rev-parse --branches --remotes')
+            heads = heads.splitlines()
+        else:
+            heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
+            heads = [heads[:-1]]
+        if ret:
+            raise util.Abort(_('cannot retrieve git heads'))
+        return heads
+
+    def catfile(self, rev, type):
+        if rev == "0" * 40:
+            raise IOError()
+        data, ret = self.gitread("git cat-file %s %s" % (type, rev))
+        if ret:
+            raise util.Abort(_('cannot read %r object at %s') % (type, rev))
+        return data
+
+    def getfile(self, name, rev):
+        return self.catfile(rev, "blob")
+
+    def getmode(self, name, rev):
+        return self.modecache[(name, rev)]
+
+    def getchanges(self, version):
+        self.modecache = {}
+        fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
+        changes = []
+        seen = set()
+        entry = None
+        for l in fh.read().split('\x00'):
+            if not entry:
+                if not l.startswith(':'):
+                    continue
+                entry = l
+                continue
+            f = l
+            if f not in seen:
+                seen.add(f)
+                entry = entry.split()
+                h = entry[3]
+                p = (entry[1] == "100755")
+                s = (entry[1] == "120000")
+                self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
+                changes.append((f, h))
+            entry = None
+        if fh.close():
+            raise util.Abort(_('cannot read changes in %s') % version)
+        return (changes, {})
+
+    def getcommit(self, version):
+        c = self.catfile(version, "commit") # read the commit hash
+        end = c.find("\n\n")
+        message = c[end + 2:]
+        message = self.recode(message)
+        l = c[:end].splitlines()
+        parents = []
+        author = committer = None
+        for e in l[1:]:
+            n, v = e.split(" ", 1)
+            if n == "author":
+                p = v.split()
+                tm, tz = p[-2:]
+                author = " ".join(p[:-2])
+                if author[0] == "<": author = author[1:-1]
+                author = self.recode(author)
+            if n == "committer":
+                p = v.split()
+                tm, tz = p[-2:]
+                committer = " ".join(p[:-2])
+                if committer[0] == "<": committer = committer[1:-1]
+                committer = self.recode(committer)
+            if n == "parent":
+                parents.append(v)
+
+        if committer and committer != author:
+            message += "\ncommitter: %s\n" % committer
+        tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
+        tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
+        date = tm + " " + str(tz)
+
+        c = commit(parents=parents, date=date, author=author, desc=message,
+                   rev=version)
+        return c
+
+    def gettags(self):
+        tags = {}
+        fh = self.gitopen('git ls-remote --tags "%s"' % self.path)
+        prefix = 'refs/tags/'
+        for line in fh:
+            line = line.strip()
+            if not line.endswith("^{}"):
+                continue
+            node, tag = line.split(None, 1)
+            if not tag.startswith(prefix):
+                continue
+            tag = tag[len(prefix):-3]
+            tags[tag] = node
+        if fh.close():
+            raise util.Abort(_('cannot read tags from %s') % self.path)
+
+        return tags
+
+    def getchangedfiles(self, version, i):
+        changes = []
+        if i is None:
+            fh = self.gitopen("git diff-tree --root -m -r %s" % version)
+            for l in fh:
+                if "\t" not in l:
+                    continue
+                m, f = l[:-1].split("\t")
+                changes.append(f)
+        else:
+            fh = self.gitopen('git diff-tree --name-only --root -r %s "%s^%s" --'
+                             % (version, version, i + 1))
+            changes = [f.rstrip('\n') for f in fh]
+        if fh.close():
+            raise util.Abort(_('cannot read changes in %s') % version)
+
+        return changes
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/gnuarch.py b/plugins/hg4idea/testData/bin/hgext/convert/gnuarch.py
new file mode 100644 (file)
index 0000000..2727772
--- /dev/null
@@ -0,0 +1,346 @@
+# gnuarch.py - GNU Arch support for the convert extension
+#
+#  Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org>
+#  and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from common import NoRepo, commandline, commit, converter_source
+from mercurial.i18n import _
+from mercurial import util
+import os, shutil, tempfile, stat, locale
+from email.Parser import Parser
+
+class gnuarch_source(converter_source, commandline):
+
+    class gnuarch_rev(object):
+        def __init__(self, rev):
+            self.rev = rev
+            self.summary = ''
+            self.date = None
+            self.author = ''
+            self.continuationof = None
+            self.add_files = []
+            self.mod_files = []
+            self.del_files = []
+            self.ren_files = {}
+            self.ren_dirs = {}
+
+    def __init__(self, ui, path, rev=None):
+        super(gnuarch_source, self).__init__(ui, path, rev=rev)
+
+        if not os.path.exists(os.path.join(path, '{arch}')):
+            raise NoRepo(_("%s does not look like a GNU Arch repository")
+                         % path)
+
+        # Could use checktool, but we want to check for baz or tla.
+        self.execmd = None
+        if util.find_exe('baz'):
+            self.execmd = 'baz'
+        else:
+            if util.find_exe('tla'):
+                self.execmd = 'tla'
+            else:
+                raise util.Abort(_('cannot find a GNU Arch tool'))
+
+        commandline.__init__(self, ui, self.execmd)
+
+        self.path = os.path.realpath(path)
+        self.tmppath = None
+
+        self.treeversion = None
+        self.lastrev = None
+        self.changes = {}
+        self.parents = {}
+        self.tags = {}
+        self.modecache = {}
+        self.catlogparser = Parser()
+        self.locale = locale.getpreferredencoding()
+        self.archives = []
+
+    def before(self):
+        # Get registered archives
+        self.archives = [i.rstrip('\n')
+                         for i in self.runlines0('archives', '-n')]
+
+        if self.execmd == 'tla':
+            output = self.run0('tree-version', self.path)
+        else:
+            output = self.run0('tree-version', '-d', self.path)
+        self.treeversion = output.strip()
+
+        # Get name of temporary directory
+        version = self.treeversion.split('/')
+        self.tmppath = os.path.join(tempfile.gettempdir(),
+                                    'hg-%s' % version[1])
+
+        # Generate parents dictionary
+        self.parents[None] = []
+        treeversion = self.treeversion
+        child = None
+        while treeversion:
+            self.ui.status(_('analyzing tree version %s...\n') % treeversion)
+
+            archive = treeversion.split('/')[0]
+            if archive not in self.archives:
+                self.ui.status(_('tree analysis stopped because it points to '
+                                 'an unregistered archive %s...\n') % archive)
+                break
+
+            # Get the complete list of revisions for that tree version
+            output, status = self.runlines('revisions', '-r', '-f', treeversion)
+            self.checkexit(status, 'failed retrieveing revisions for %s'
+                           % treeversion)
+
+            # No new iteration unless a revision has a continuation-of header
+            treeversion = None
+
+            for l in output:
+                rev = l.strip()
+                self.changes[rev] = self.gnuarch_rev(rev)
+                self.parents[rev] = []
+
+                # Read author, date and summary
+                catlog, status = self.run('cat-log', '-d', self.path, rev)
+                if status:
+                    catlog  = self.run0('cat-archive-log', rev)
+                self._parsecatlog(catlog, rev)
+
+                # Populate the parents map
+                self.parents[child].append(rev)
+
+                # Keep track of the current revision as the child of the next
+                # revision scanned
+                child = rev
+
+                # Check if we have to follow the usual incremental history
+                # or if we have to 'jump' to a different treeversion given
+                # by the continuation-of header.
+                if self.changes[rev].continuationof:
+                    treeversion = '--'.join(
+                        self.changes[rev].continuationof.split('--')[:-1])
+                    break
+
+                # If we reached a base-0 revision w/o any continuation-of
+                # header, it means the tree history ends here.
+                if rev[-6:] == 'base-0':
+                    break
+
+    def after(self):
+        self.ui.debug('cleaning up %s\n' % self.tmppath)
+        shutil.rmtree(self.tmppath, ignore_errors=True)
+
+    def getheads(self):
+        return self.parents[None]
+
+    def getfile(self, name, rev):
+        if rev != self.lastrev:
+            raise util.Abort(_('internal calling inconsistency'))
+
+        # Raise IOError if necessary (i.e. deleted files).
+        if not os.path.exists(os.path.join(self.tmppath, name)):
+            raise IOError
+
+        data, mode = self._getfile(name, rev)
+        self.modecache[(name, rev)] = mode
+
+        return data
+
+    def getmode(self, name, rev):
+        return self.modecache[(name, rev)]
+
+    def getchanges(self, rev):
+        self.modecache = {}
+        self._update(rev)
+        changes = []
+        copies = {}
+
+        for f in self.changes[rev].add_files:
+            changes.append((f, rev))
+
+        for f in self.changes[rev].mod_files:
+            changes.append((f, rev))
+
+        for f in self.changes[rev].del_files:
+            changes.append((f, rev))
+
+        for src in self.changes[rev].ren_files:
+            to = self.changes[rev].ren_files[src]
+            changes.append((src, rev))
+            changes.append((to, rev))
+            copies[to] = src
+
+        for src in self.changes[rev].ren_dirs:
+            to = self.changes[rev].ren_dirs[src]
+            chgs, cps = self._rendirchanges(src, to)
+            changes += [(f, rev) for f in chgs]
+            copies.update(cps)
+
+        self.lastrev = rev
+        return sorted(set(changes)), copies
+
+    def getcommit(self, rev):
+        changes = self.changes[rev]
+        return commit(author=changes.author, date=changes.date,
+                      desc=changes.summary, parents=self.parents[rev], rev=rev)
+
+    def gettags(self):
+        return self.tags
+
+    def _execute(self, cmd, *args, **kwargs):
+        cmdline = [self.execmd, cmd]
+        cmdline += args
+        cmdline = [util.shellquote(arg) for arg in cmdline]
+        cmdline += ['>', util.nulldev, '2>', util.nulldev]
+        cmdline = util.quotecommand(' '.join(cmdline))
+        self.ui.debug(cmdline, '\n')
+        return os.system(cmdline)
+
+    def _update(self, rev):
+        self.ui.debug('applying revision %s...\n' % rev)
+        changeset, status = self.runlines('replay', '-d', self.tmppath,
+                                              rev)
+        if status:
+            # Something went wrong while merging (baz or tla
+            # issue?), get latest revision and try from there
+            shutil.rmtree(self.tmppath, ignore_errors=True)
+            self._obtainrevision(rev)
+        else:
+            old_rev = self.parents[rev][0]
+            self.ui.debug('computing changeset between %s and %s...\n'
+                          % (old_rev, rev))
+            self._parsechangeset(changeset, rev)
+
+    def _getfile(self, name, rev):
+        mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
+        if stat.S_ISLNK(mode):
+            data = os.readlink(os.path.join(self.tmppath, name))
+            mode = mode and 'l' or ''
+        else:
+            data = open(os.path.join(self.tmppath, name), 'rb').read()
+            mode = (mode & 0111) and 'x' or ''
+        return data, mode
+
+    def _exclude(self, name):
+        exclude = ['{arch}', '.arch-ids', '.arch-inventory']
+        for exc in exclude:
+            if name.find(exc) != -1:
+                return True
+        return False
+
+    def _readcontents(self, path):
+        files = []
+        contents = os.listdir(path)
+        while len(contents) > 0:
+            c = contents.pop()
+            p = os.path.join(path, c)
+            # os.walk could be used, but here we avoid internal GNU
+            # Arch files and directories, thus saving a lot time.
+            if not self._exclude(p):
+                if os.path.isdir(p):
+                    contents += [os.path.join(c, f) for f in os.listdir(p)]
+                else:
+                    files.append(c)
+        return files
+
+    def _rendirchanges(self, src, dest):
+        changes = []
+        copies = {}
+        files = self._readcontents(os.path.join(self.tmppath, dest))
+        for f in files:
+            s = os.path.join(src, f)
+            d = os.path.join(dest, f)
+            changes.append(s)
+            changes.append(d)
+            copies[d] = s
+        return changes, copies
+
+    def _obtainrevision(self, rev):
+        self.ui.debug('obtaining revision %s...\n' % rev)
+        output = self._execute('get', rev, self.tmppath)
+        self.checkexit(output)
+        self.ui.debug('analyzing revision %s...\n' % rev)
+        files = self._readcontents(self.tmppath)
+        self.changes[rev].add_files += files
+
+    def _stripbasepath(self, path):
+        if path.startswith('./'):
+            return path[2:]
+        return path
+
+    def _parsecatlog(self, data, rev):
+        try:
+            catlog = self.catlogparser.parsestr(data)
+
+            # Commit date
+            self.changes[rev].date = util.datestr(
+                util.strdate(catlog['Standard-date'],
+                             '%Y-%m-%d %H:%M:%S'))
+
+            # Commit author
+            self.changes[rev].author = self.recode(catlog['Creator'])
+
+            # Commit description
+            self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
+                                                    catlog.get_payload()))
+            self.changes[rev].summary = self.recode(self.changes[rev].summary)
+
+            # Commit revision origin when dealing with a branch or tag
+            if 'Continuation-of' in catlog:
+                self.changes[rev].continuationof = self.recode(
+                    catlog['Continuation-of'])
+        except Exception:
+            raise util.Abort(_('could not parse cat-log of %s') % rev)
+
+    def _parsechangeset(self, data, rev):
+        for l in data:
+            l = l.strip()
+            # Added file (ignore added directory)
+            if l.startswith('A') and not l.startswith('A/'):
+                file = self._stripbasepath(l[1:].strip())
+                if not self._exclude(file):
+                    self.changes[rev].add_files.append(file)
+            # Deleted file (ignore deleted directory)
+            elif l.startswith('D') and not l.startswith('D/'):
+                file = self._stripbasepath(l[1:].strip())
+                if not self._exclude(file):
+                    self.changes[rev].del_files.append(file)
+            # Modified binary file
+            elif l.startswith('Mb'):
+                file = self._stripbasepath(l[2:].strip())
+                if not self._exclude(file):
+                    self.changes[rev].mod_files.append(file)
+            # Modified link
+            elif l.startswith('M->'):
+                file = self._stripbasepath(l[3:].strip())
+                if not self._exclude(file):
+                    self.changes[rev].mod_files.append(file)
+            # Modified file
+            elif l.startswith('M'):
+                file = self._stripbasepath(l[1:].strip())
+                if not self._exclude(file):
+                    self.changes[rev].mod_files.append(file)
+            # Renamed file (or link)
+            elif l.startswith('=>'):
+                files = l[2:].strip().split(' ')
+                if len(files) == 1:
+                    files = l[2:].strip().split('\t')
+                src = self._stripbasepath(files[0])
+                dst = self._stripbasepath(files[1])
+                if not self._exclude(src) and not self._exclude(dst):
+                    self.changes[rev].ren_files[src] = dst
+            # Conversion from file to link or from link to file (modified)
+            elif l.startswith('ch'):
+                file = self._stripbasepath(l[2:].strip())
+                if not self._exclude(file):
+                    self.changes[rev].mod_files.append(file)
+            # Renamed directory
+            elif l.startswith('/>'):
+                dirs = l[2:].strip().split(' ')
+                if len(dirs) == 1:
+                    dirs = l[2:].strip().split('\t')
+                src = self._stripbasepath(dirs[0])
+                dst = self._stripbasepath(dirs[1])
+                if not self._exclude(src) and not self._exclude(dst):
+                    self.changes[rev].ren_dirs[src] = dst
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/hg.py b/plugins/hg4idea/testData/bin/hgext/convert/hg.py
new file mode 100644 (file)
index 0000000..2a3d137
--- /dev/null
@@ -0,0 +1,377 @@
+# hg.py - hg backend for convert extension
+#
+#  Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# Notes for hg->hg conversion:
+#
+# * Old versions of Mercurial didn't trim the whitespace from the ends
+#   of commit messages, but new versions do.  Changesets created by
+#   those older versions, then converted, may thus have different
+#   hashes for changesets that are otherwise identical.
+#
+# * Using "--config convert.hg.saverev=true" will make the source
+#   identifier to be stored in the converted revision. This will cause
+#   the converted revision to have a different identity than the
+#   source.
+
+
+import os, time, cStringIO
+from mercurial.i18n import _
+from mercurial.node import bin, hex, nullid
+from mercurial import hg, util, context, error
+
+from common import NoRepo, commit, converter_source, converter_sink
+
+class mercurial_sink(converter_sink):
+    def __init__(self, ui, path):
+        converter_sink.__init__(self, ui, path)
+        self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
+        self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
+        self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
+        self.lastbranch = None
+        if os.path.isdir(path) and len(os.listdir(path)) > 0:
+            try:
+                self.repo = hg.repository(self.ui, path)
+                if not self.repo.local():
+                    raise NoRepo(_('%s is not a local Mercurial repository')
+                                 % path)
+            except error.RepoError, err:
+                ui.traceback()
+                raise NoRepo(err.args[0])
+        else:
+            try:
+                ui.status(_('initializing destination %s repository\n') % path)
+                self.repo = hg.repository(self.ui, path, create=True)
+                if not self.repo.local():
+                    raise NoRepo(_('%s is not a local Mercurial repository')
+                                 % path)
+                self.created.append(path)
+            except error.RepoError:
+                ui.traceback()
+                raise NoRepo(_("could not create hg repository %s as sink")
+                             % path)
+        self.lock = None
+        self.wlock = None
+        self.filemapmode = False
+
+    def before(self):
+        self.ui.debug('run hg sink pre-conversion action\n')
+        self.wlock = self.repo.wlock()
+        self.lock = self.repo.lock()
+
+    def after(self):
+        self.ui.debug('run hg sink post-conversion action\n')
+        if self.lock:
+            self.lock.release()
+        if self.wlock:
+            self.wlock.release()
+
+    def revmapfile(self):
+        return os.path.join(self.path, ".hg", "shamap")
+
+    def authorfile(self):
+        return os.path.join(self.path, ".hg", "authormap")
+
+    def getheads(self):
+        h = self.repo.changelog.heads()
+        return [hex(x) for x in h]
+
+    def setbranch(self, branch, pbranches):
+        if not self.clonebranches:
+            return
+
+        setbranch = (branch != self.lastbranch)
+        self.lastbranch = branch
+        if not branch:
+            branch = 'default'
+        pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
+        pbranch = pbranches and pbranches[0][1] or 'default'
+
+        branchpath = os.path.join(self.path, branch)
+        if setbranch:
+            self.after()
+            try:
+                self.repo = hg.repository(self.ui, branchpath)
+            except:
+                self.repo = hg.repository(self.ui, branchpath, create=True)
+            self.before()
+
+        # pbranches may bring revisions from other branches (merge parents)
+        # Make sure we have them, or pull them.
+        missings = {}
+        for b in pbranches:
+            try:
+                self.repo.lookup(b[0])
+            except:
+                missings.setdefault(b[1], []).append(b[0])
+
+        if missings:
+            self.after()
+            for pbranch, heads in missings.iteritems():
+                pbranchpath = os.path.join(self.path, pbranch)
+                prepo = hg.repository(self.ui, pbranchpath)
+                self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
+                self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
+            self.before()
+
+    def _rewritetags(self, source, revmap, data):
+        fp = cStringIO.StringIO()
+        for line in data.splitlines():
+            s = line.split(' ', 1)
+            if len(s) != 2:
+                continue
+            revid = revmap.get(source.lookuprev(s[0]))
+            if not revid:
+                continue
+            fp.write('%s %s\n' % (revid, s[1]))
+        return fp.getvalue()
+
+    def putcommit(self, files, copies, parents, commit, source, revmap):
+
+        files = dict(files)
+        def getfilectx(repo, memctx, f):
+            v = files[f]
+            data = source.getfile(f, v)
+            e = source.getmode(f, v)
+            if f == '.hgtags':
+                data = self._rewritetags(source, revmap, data)
+            return context.memfilectx(f, data, 'l' in e, 'x' in e, copies.get(f))
+
+        pl = []
+        for p in parents:
+            if p not in pl:
+                pl.append(p)
+        parents = pl
+        nparents = len(parents)
+        if self.filemapmode and nparents == 1:
+            m1node = self.repo.changelog.read(bin(parents[0]))[0]
+            parent = parents[0]
+
+        if len(parents) < 2:
+            parents.append(nullid)
+        if len(parents) < 2:
+            parents.append(nullid)
+        p2 = parents.pop(0)
+
+        text = commit.desc
+        extra = commit.extra.copy()
+        if self.branchnames and commit.branch:
+            extra['branch'] = commit.branch
+        if commit.rev:
+            extra['convert_revision'] = commit.rev
+
+        while parents:
+            p1 = p2
+            p2 = parents.pop(0)
+            ctx = context.memctx(self.repo, (p1, p2), text, files.keys(),
+                                 getfilectx, commit.author, commit.date, extra)
+            self.repo.commitctx(ctx)
+            text = "(octopus merge fixup)\n"
+            p2 = hex(self.repo.changelog.tip())
+
+        if self.filemapmode and nparents == 1:
+            man = self.repo.manifest
+            mnode = self.repo.changelog.read(bin(p2))[0]
+            if not man.cmp(m1node, man.revision(mnode)):
+                self.ui.status(_("filtering out empty revision\n"))
+                self.repo.rollback()
+                return parent
+        return p2
+
+    def puttags(self, tags):
+        try:
+            parentctx = self.repo[self.tagsbranch]
+            tagparent = parentctx.node()
+        except error.RepoError:
+            parentctx = None
+            tagparent = nullid
+
+        try:
+            oldlines = sorted(parentctx['.hgtags'].data().splitlines(True))
+        except:
+            oldlines = []
+
+        newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
+        if newlines == oldlines:
+            return None, None
+        data = "".join(newlines)
+        def getfilectx(repo, memctx, f):
+            return context.memfilectx(f, data, False, False, None)
+
+        self.ui.status(_("updating tags\n"))
+        date = "%s 0" % int(time.mktime(time.gmtime()))
+        extra = {'branch': self.tagsbranch}
+        ctx = context.memctx(self.repo, (tagparent, None), "update tags",
+                             [".hgtags"], getfilectx, "convert-repo", date,
+                             extra)
+        self.repo.commitctx(ctx)
+        return hex(self.repo.changelog.tip()), hex(tagparent)
+
+    def setfilemapmode(self, active):
+        self.filemapmode = active
+
+class mercurial_source(converter_source):
+    def __init__(self, ui, path, rev=None):
+        converter_source.__init__(self, ui, path, rev)
+        self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
+        self.ignored = set()
+        self.saverev = ui.configbool('convert', 'hg.saverev', False)
+        try:
+            self.repo = hg.repository(self.ui, path)
+            # try to provoke an exception if this isn't really a hg
+            # repo, but some other bogus compatible-looking url
+            if not self.repo.local():
+                raise error.RepoError()
+        except error.RepoError:
+            ui.traceback()
+            raise NoRepo(_("%s is not a local Mercurial repository") % path)
+        self.lastrev = None
+        self.lastctx = None
+        self._changescache = None
+        self.convertfp = None
+        # Restrict converted revisions to startrev descendants
+        startnode = ui.config('convert', 'hg.startrev')
+        if startnode is not None:
+            try:
+                startnode = self.repo.lookup(startnode)
+            except error.RepoError:
+                raise util.Abort(_('%s is not a valid start revision')
+                                 % startnode)
+            startrev = self.repo.changelog.rev(startnode)
+            children = {startnode: 1}
+            for rev in self.repo.changelog.descendants(startrev):
+                children[self.repo.changelog.node(rev)] = 1
+            self.keep = children.__contains__
+        else:
+            self.keep = util.always
+
+    def changectx(self, rev):
+        if self.lastrev != rev:
+            self.lastctx = self.repo[rev]
+            self.lastrev = rev
+        return self.lastctx
+
+    def parents(self, ctx):
+        return [p for p in ctx.parents() if p and self.keep(p.node())]
+
+    def getheads(self):
+        if self.rev:
+            heads = [self.repo[self.rev].node()]
+        else:
+            heads = self.repo.heads()
+        return [hex(h) for h in heads if self.keep(h)]
+
+    def getfile(self, name, rev):
+        try:
+            return self.changectx(rev)[name].data()
+        except error.LookupError, err:
+            raise IOError(err)
+
+    def getmode(self, name, rev):
+        return self.changectx(rev).manifest().flags(name)
+
+    def getchanges(self, rev):
+        ctx = self.changectx(rev)
+        parents = self.parents(ctx)
+        if not parents:
+            files = sorted(ctx.manifest())
+            if self.ignoreerrors:
+                # calling getcopies() is a simple way to detect missing
+                # revlogs and populate self.ignored
+                self.getcopies(ctx, parents, files)
+            return [(f, rev) for f in files if f not in self.ignored], {}
+        if self._changescache and self._changescache[0] == rev:
+            m, a, r = self._changescache[1]
+        else:
+            m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3]
+        # getcopies() detects missing revlogs early, run it before
+        # filtering the changes.
+        copies = self.getcopies(ctx, parents, m + a)
+        changes = [(name, rev) for name in m + a + r
+                   if name not in self.ignored]
+        return sorted(changes), copies
+
+    def getcopies(self, ctx, parents, files):
+        copies = {}
+        for name in files:
+            if name in self.ignored:
+                continue
+            try:
+                copysource, copynode = ctx.filectx(name).renamed()
+                if copysource in self.ignored or not self.keep(copynode):
+                    continue
+                # Ignore copy sources not in parent revisions
+                found = False
+                for p in parents:
+                    if copysource in p:
+                        found = True
+                        break
+                if not found:
+                    continue
+                copies[name] = copysource
+            except TypeError:
+                pass
+            except error.LookupError, e:
+                if not self.ignoreerrors:
+                    raise
+                self.ignored.add(name)
+                self.ui.warn(_('ignoring: %s\n') % e)
+        return copies
+
+    def getcommit(self, rev):
+        ctx = self.changectx(rev)
+        parents = [p.hex() for p in self.parents(ctx)]
+        if self.saverev:
+            crev = rev
+        else:
+            crev = None
+        return commit(author=ctx.user(), date=util.datestr(ctx.date()),
+                      desc=ctx.description(), rev=crev, parents=parents,
+                      branch=ctx.branch(), extra=ctx.extra(),
+                      sortkey=ctx.rev())
+
+    def gettags(self):
+        tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
+        return dict([(name, hex(node)) for name, node in tags
+                     if self.keep(node)])
+
+    def getchangedfiles(self, rev, i):
+        ctx = self.changectx(rev)
+        parents = self.parents(ctx)
+        if not parents and i is None:
+            i = 0
+            changes = [], ctx.manifest().keys(), []
+        else:
+            i = i or 0
+            changes = self.repo.status(parents[i].node(), ctx.node())[:3]
+        changes = [[f for f in l if f not in self.ignored] for l in changes]
+
+        if i == 0:
+            self._changescache = (rev, changes)
+
+        return changes[0] + changes[1] + changes[2]
+
+    def converted(self, rev, destrev):
+        if self.convertfp is None:
+            self.convertfp = open(os.path.join(self.path, '.hg', 'shamap'),
+                                  'a')
+        self.convertfp.write('%s %s\n' % (destrev, rev))
+        self.convertfp.flush()
+
+    def before(self):
+        self.ui.debug('run hg source pre-conversion action\n')
+
+    def after(self):
+        self.ui.debug('run hg source post-conversion action\n')
+
+    def hasnativeorder(self):
+        return True
+
+    def lookuprev(self, rev):
+        try:
+            return hex(self.repo.lookup(rev))
+        except error.RepoError:
+            return None
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/monotone.py b/plugins/hg4idea/testData/bin/hgext/convert/monotone.py
new file mode 100644 (file)
index 0000000..e5ecd75
--- /dev/null
@@ -0,0 +1,229 @@
+# monotone.py - monotone support for the convert extension
+#
+#  Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and
+#  others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os, re
+from mercurial import util
+from common import NoRepo, commit, converter_source, checktool
+from common import commandline
+from mercurial.i18n import _
+
+class monotone_source(converter_source, commandline):
+    def __init__(self, ui, path=None, rev=None):
+        converter_source.__init__(self, ui, path, rev)
+        commandline.__init__(self, ui, 'mtn')
+
+        self.ui = ui
+        self.path = path
+
+        norepo = NoRepo(_("%s does not look like a monotone repository")
+                        % path)
+        if not os.path.exists(os.path.join(path, '_MTN')):
+            # Could be a monotone repository (SQLite db file)
+            try:
+                header = file(path, 'rb').read(16)
+            except:
+                header = ''
+            if header != 'SQLite format 3\x00':
+                raise norepo
+
+        # regular expressions for parsing monotone output
+        space    = r'\s*'
+        name     = r'\s+"((?:\\"|[^"])*)"\s*'
+        value    = name
+        revision = r'\s+\[(\w+)\]\s*'
+        lines    = r'(?:.|\n)+'
+
+        self.dir_re      = re.compile(space + "dir" + name)
+        self.file_re     = re.compile(space + "file" + name +
+                                      "content" + revision)
+        self.add_file_re = re.compile(space + "add_file" + name +
+                                      "content" + revision)
+        self.patch_re    = re.compile(space + "patch" + name +
+                                      "from" + revision + "to" + revision)
+        self.rename_re   = re.compile(space + "rename" + name + "to" + name)
+        self.delete_re   = re.compile(space + "delete" + name)
+        self.tag_re      = re.compile(space + "tag" + name + "revision" +
+                                      revision)
+        self.cert_re     = re.compile(lines + space + "name" + name +
+                                      "value" + value)
+
+        attr = space + "file" + lines + space + "attr" + space
+        self.attr_execute_re = re.compile(attr  + '"mtn:execute"' +
+                                          space + '"true"')
+
+        # cached data
+        self.manifest_rev = None
+        self.manifest = None
+        self.files = None
+        self.dirs  = None
+
+        checktool('mtn', abort=False)
+
+        # test if there are any revisions
+        self.rev = None
+        try:
+            self.getheads()
+        except:
+            raise norepo
+        self.rev = rev
+
+    def mtnrun(self, *args, **kwargs):
+        kwargs['d'] = self.path
+        return self.run0('automate', *args, **kwargs)
+
+    def mtnloadmanifest(self, rev):
+        if self.manifest_rev == rev:
+            return
+        self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
+        self.manifest_rev = rev
+        self.files = {}
+        self.dirs = {}
+
+        for e in self.manifest:
+            m = self.file_re.match(e)
+            if m:
+                attr = ""
+                name = m.group(1)
+                node = m.group(2)
+                if self.attr_execute_re.match(e):
+                    attr += "x"
+                self.files[name] = (node, attr)
+            m = self.dir_re.match(e)
+            if m:
+                self.dirs[m.group(1)] = True
+
+    def mtnisfile(self, name, rev):
+        # a non-file could be a directory or a deleted or renamed file
+        self.mtnloadmanifest(rev)
+        return name in self.files
+
+    def mtnisdir(self, name, rev):
+        self.mtnloadmanifest(rev)
+        return name in self.dirs
+
+    def mtngetcerts(self, rev):
+        certs = {"author":"<missing>", "date":"<missing>",
+            "changelog":"<missing>", "branch":"<missing>"}
+        certlist = self.mtnrun("certs", rev)
+        # mtn < 0.45:
+        #   key "test@selenic.com"
+        # mtn >= 0.45:
+        #   key [ff58a7ffb771907c4ff68995eada1c4da068d328]
+        certlist = re.split('\n\n      key ["\[]', certlist)
+        for e in certlist:
+            m = self.cert_re.match(e)
+            if m:
+                name, value = m.groups()
+                value = value.replace(r'\"', '"')
+                value = value.replace(r'\\', '\\')
+                certs[name] = value
+        # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
+        # and all times are stored in UTC
+        certs["date"] = certs["date"].split('.')[0] + " UTC"
+        return certs
+
+    # implement the converter_source interface:
+
+    def getheads(self):
+        if not self.rev:
+            return self.mtnrun("leaves").splitlines()
+        else:
+            return [self.rev]
+
+    def getchanges(self, rev):
+        #revision = self.mtncmd("get_revision %s" % rev).split("\n\n")
+        revision = self.mtnrun("get_revision", rev).split("\n\n")
+        files = {}
+        ignoremove = {}
+        renameddirs = []
+        copies = {}
+        for e in revision:
+            m = self.add_file_re.match(e)
+            if m:
+                files[m.group(1)] = rev
+                ignoremove[m.group(1)] = rev
+            m = self.patch_re.match(e)
+            if m:
+                files[m.group(1)] = rev
+            # Delete/rename is handled later when the convert engine
+            # discovers an IOError exception from getfile,
+            # but only if we add the "from" file to the list of changes.
+            m = self.delete_re.match(e)
+            if m:
+                files[m.group(1)] = rev
+            m = self.rename_re.match(e)
+            if m:
+                toname = m.group(2)
+                fromname = m.group(1)
+                if self.mtnisfile(toname, rev):
+                    ignoremove[toname] = 1
+                    copies[toname] = fromname
+                    files[toname] = rev
+                    files[fromname] = rev
+                elif self.mtnisdir(toname, rev):
+                    renameddirs.append((fromname, toname))
+
+        # Directory renames can be handled only once we have recorded
+        # all new files
+        for fromdir, todir in renameddirs:
+            renamed = {}
+            for tofile in self.files:
+                if tofile in ignoremove:
+                    continue
+                if tofile.startswith(todir + '/'):
+                    renamed[tofile] = fromdir + tofile[len(todir):]
+                    # Avoid chained moves like:
+                    # d1(/a) => d3/d1(/a)
+                    # d2 => d3
+                    ignoremove[tofile] = 1
+            for tofile, fromfile in renamed.items():
+                self.ui.debug (_("copying file in renamed directory "
+                                 "from '%s' to '%s'")
+                               % (fromfile, tofile), '\n')
+                files[tofile] = rev
+                copies[tofile] = fromfile
+            for fromfile in renamed.values():
+                files[fromfile] = rev
+
+        return (files.items(), copies)
+
+    def getmode(self, name, rev):
+        self.mtnloadmanifest(rev)
+        node, attr = self.files.get(name, (None, ""))
+        return attr
+
+    def getfile(self, name, rev):
+        if not self.mtnisfile(name, rev):
+            raise IOError() # file was deleted or renamed
+        try:
+            return self.mtnrun("get_file_of", name, r=rev)
+        except:
+            raise IOError() # file was deleted or renamed
+
+    def getcommit(self, rev):
+        certs   = self.mtngetcerts(rev)
+        return commit(
+            author=certs["author"],
+            date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
+            desc=certs["changelog"],
+            rev=rev,
+            parents=self.mtnrun("parents", rev).splitlines(),
+            branch=certs["branch"])
+
+    def gettags(self):
+        tags = {}
+        for e in self.mtnrun("tags").split("\n\n"):
+            m = self.tag_re.match(e)
+            if m:
+                tags[m.group(1)] = m.group(2)
+        return tags
+
+    def getchangedfiles(self, rev, i):
+        # This function is only needed to support --filemap
+        # ... and we don't support that
+        raise NotImplementedError()
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/p4.py b/plugins/hg4idea/testData/bin/hgext/convert/p4.py
new file mode 100644 (file)
index 0000000..64967f2
--- /dev/null
@@ -0,0 +1,208 @@
+# Perforce source for convert extension.
+#
+# Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from mercurial import util
+from mercurial.i18n import _
+
+from common import commit, converter_source, checktool, NoRepo
+import marshal
+import re
+
+def loaditer(f):
+    "Yield the dictionary objects generated by p4"
+    try:
+        while True:
+            d = marshal.load(f)
+            if not d:
+                break
+            yield d
+    except EOFError:
+        pass
+
+class p4_source(converter_source):
+    def __init__(self, ui, path, rev=None):
+        super(p4_source, self).__init__(ui, path, rev=rev)
+
+        if "/" in path and not path.startswith('//'):
+            raise NoRepo(_('%s does not look like a P4 repository') % path)
+
+        checktool('p4', abort=False)
+
+        self.p4changes = {}
+        self.heads = {}
+        self.changeset = {}
+        self.files = {}
+        self.tags = {}
+        self.lastbranch = {}
+        self.parent = {}
+        self.encoding = "latin_1"
+        self.depotname = {}           # mapping from local name to depot name
+        self.modecache = {}
+        self.re_type = re.compile(
+            "([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
+            "(\+\w+)?$")
+        self.re_keywords = re.compile(
+            r"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)"
+            r":[^$\n]*\$")
+        self.re_keywords_old = re.compile("\$(Id|Header):[^$\n]*\$")
+
+        self._parse(ui, path)
+
+    def _parse_view(self, path):
+        "Read changes affecting the path"
+        cmd = 'p4 -G changes -s submitted "%s"' % path
+        stdout = util.popen(cmd, mode='rb')
+        for d in loaditer(stdout):
+            c = d.get("change", None)
+            if c:
+                self.p4changes[c] = True
+
+    def _parse(self, ui, path):
+        "Prepare list of P4 filenames and revisions to import"
+        ui.status(_('reading p4 views\n'))
+
+        # read client spec or view
+        if "/" in path:
+            self._parse_view(path)
+            if path.startswith("//") and path.endswith("/..."):
+                views = {path[:-3]:""}
+            else:
+                views = {"//": ""}
+        else:
+            cmd = 'p4 -G client -o "%s"' % path
+            clientspec = marshal.load(util.popen(cmd, mode='rb'))
+
+            views = {}
+            for client in clientspec:
+                if client.startswith("View"):
+                    sview, cview = clientspec[client].split()
+                    self._parse_view(sview)
+                    if sview.endswith("...") and cview.endswith("..."):
+                        sview = sview[:-3]
+                        cview = cview[:-3]
+                    cview = cview[2:]
+                    cview = cview[cview.find("/") + 1:]
+                    views[sview] = cview
+
+        # list of changes that affect our source files
+        self.p4changes = self.p4changes.keys()
+        self.p4changes.sort(key=int)
+
+        # list with depot pathnames, longest first
+        vieworder = views.keys()
+        vieworder.sort(key=len, reverse=True)
+
+        # handle revision limiting
+        startrev = self.ui.config('convert', 'p4.startrev', default=0)
+        self.p4changes = [x for x in self.p4changes
+                          if ((not startrev or int(x) >= int(startrev)) and
+                              (not self.rev or int(x) <= int(self.rev)))]
+
+        # now read the full changelists to get the list of file revisions
+        ui.status(_('collecting p4 changelists\n'))
+        lastid = None
+        for change in self.p4changes:
+            cmd = "p4 -G describe %s" % change
+            stdout = util.popen(cmd, mode='rb')
+            d = marshal.load(stdout)
+
+            desc = self.recode(d["desc"])
+            shortdesc = desc.split("\n", 1)[0]
+            t = '%s %s' % (d["change"], repr(shortdesc)[1:-1])
+            ui.status(util.ellipsis(t, 80) + '\n')
+
+            if lastid:
+                parents = [lastid]
+            else:
+                parents = []
+
+            date = (int(d["time"]), 0)     # timezone not set
+            c = commit(author=self.recode(d["user"]), date=util.datestr(date),
+                       parents=parents, desc=desc, branch='',
+                       extra={"p4": change})
+
+            files = []
+            i = 0
+            while ("depotFile%d" % i) in d and ("rev%d" % i) in d:
+                oldname = d["depotFile%d" % i]
+                filename = None
+                for v in vieworder:
+                    if oldname.startswith(v):
+                        filename = views[v] + oldname[len(v):]
+                        break
+                if filename:
+                    files.append((filename, d["rev%d" % i]))
+                    self.depotname[filename] = oldname
+                i += 1
+            self.changeset[change] = c
+            self.files[change] = files
+            lastid = change
+
+        if lastid:
+            self.heads = [lastid]
+
+    def getheads(self):
+        return self.heads
+
+    def getfile(self, name, rev):
+        cmd = 'p4 -G print "%s#%s"' % (self.depotname[name], rev)
+        stdout = util.popen(cmd, mode='rb')
+
+        mode = None
+        contents = ""
+        keywords = None
+
+        for d in loaditer(stdout):
+            code = d["code"]
+            data = d.get("data")
+
+            if code == "error":
+                raise IOError(d["generic"], data)
+
+            elif code == "stat":
+                p4type = self.re_type.match(d["type"])
+                if p4type:
+                    mode = ""
+                    flags = (p4type.group(1) or "") + (p4type.group(3) or "")
+                    if "x" in flags:
+                        mode = "x"
+                    if p4type.group(2) == "symlink":
+                        mode = "l"
+                    if "ko" in flags:
+                        keywords = self.re_keywords_old
+                    elif "k" in flags:
+                        keywords = self.re_keywords
+
+            elif code == "text" or code == "binary":
+                contents += data
+
+        if mode is None:
+            raise IOError(0, "bad stat")
+
+        self.modecache[(name, rev)] = mode
+
+        if keywords:
+            contents = keywords.sub("$\\1$", contents)
+        if mode == "l" and contents.endswith("\n"):
+            contents = contents[:-1]
+
+        return contents
+
+    def getmode(self, name, rev):
+        return self.modecache[(name, rev)]
+
+    def getchanges(self, rev):
+        return self.files[rev], {}
+
+    def getcommit(self, rev):
+        return self.changeset[rev]
+
+    def gettags(self):
+        return self.tags
+
+    def getchangedfiles(self, rev, i):
+        return sorted([x[0] for x in self.files[rev]])
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/subversion.py b/plugins/hg4idea/testData/bin/hgext/convert/subversion.py
new file mode 100644 (file)
index 0000000..d2ac80c
--- /dev/null
@@ -0,0 +1,1178 @@
+# Subversion 1.4/1.5 Python API backend
+#
+# Copyright(C) 2007 Daniel Holth et al
+
+import os
+import re
+import sys
+import cPickle as pickle
+import tempfile
+import urllib
+import urllib2
+
+from mercurial import strutil, util, encoding
+from mercurial.i18n import _
+
+# Subversion stuff. Works best with very recent Python SVN bindings
+# e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
+# these bindings.
+
+from cStringIO import StringIO
+
+from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
+from common import commandline, converter_source, converter_sink, mapfile
+
+try:
+    from svn.core import SubversionException, Pool
+    import svn
+    import svn.client
+    import svn.core
+    import svn.ra
+    import svn.delta
+    import transport
+    import warnings
+    warnings.filterwarnings('ignore',
+            module='svn.core',
+            category=DeprecationWarning)
+
+except ImportError:
+    pass
+
+class SvnPathNotFound(Exception):
+    pass
+
+def geturl(path):
+    try:
+        return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
+    except SubversionException:
+        pass
+    if os.path.isdir(path):
+        path = os.path.normpath(os.path.abspath(path))
+        if os.name == 'nt':
+            path = '/' + util.normpath(path)
+        # Module URL is later compared with the repository URL returned
+        # by svn API, which is UTF-8.
+        path = encoding.tolocal(path)
+        return 'file://%s' % urllib.quote(path)
+    return path
+
+def optrev(number):
+    optrev = svn.core.svn_opt_revision_t()
+    optrev.kind = svn.core.svn_opt_revision_number
+    optrev.value.number = number
+    return optrev
+
+class changedpath(object):
+    def __init__(self, p):
+        self.copyfrom_path = p.copyfrom_path
+        self.copyfrom_rev = p.copyfrom_rev
+        self.action = p.action
+
+def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
+                    strict_node_history=False):
+    protocol = -1
+    def receiver(orig_paths, revnum, author, date, message, pool):
+        if orig_paths is not None:
+            for k, v in orig_paths.iteritems():
+                orig_paths[k] = changedpath(v)
+        pickle.dump((orig_paths, revnum, author, date, message),
+                    fp, protocol)
+
+    try:
+        # Use an ra of our own so that our parent can consume
+        # our results without confusing the server.
+        t = transport.SvnRaTransport(url=url)
+        svn.ra.get_log(t.ra, paths, start, end, limit,
+                       discover_changed_paths,
+                       strict_node_history,
+                       receiver)
+    except SubversionException, (inst, num):
+        pickle.dump(num, fp, protocol)
+    except IOError:
+        # Caller may interrupt the iteration
+        pickle.dump(None, fp, protocol)
+    else:
+        pickle.dump(None, fp, protocol)
+    fp.close()
+    # With large history, cleanup process goes crazy and suddenly
+    # consumes *huge* amount of memory. The output file being closed,
+    # there is no need for clean termination.
+    os._exit(0)
+
+def debugsvnlog(ui, **opts):
+    """Fetch SVN log in a subprocess and channel them back to parent to
+    avoid memory collection issues.
+    """
+    util.set_binary(sys.stdin)
+    util.set_binary(sys.stdout)
+    args = decodeargs(sys.stdin.read())
+    get_log_child(sys.stdout, *args)
+
+class logstream(object):
+    """Interruptible revision log iterator."""
+    def __init__(self, stdout):
+        self._stdout = stdout
+
+    def __iter__(self):
+        while True:
+            try:
+                entry = pickle.load(self._stdout)
+            except EOFError:
+                raise util.Abort(_('Mercurial failed to run itself, check'
+                                   ' hg executable is in PATH'))
+            try:
+                orig_paths, revnum, author, date, message = entry
+            except:
+                if entry is None:
+                    break
+                raise SubversionException("child raised exception", entry)
+            yield entry
+
+    def close(self):
+        if self._stdout:
+            self._stdout.close()
+            self._stdout = None
+
+
+# Check to see if the given path is a local Subversion repo. Verify this by
+# looking for several svn-specific files and directories in the given
+# directory.
+def filecheck(ui, path, proto):
+    for x in ('locks', 'hooks', 'format', 'db'):
+        if not os.path.exists(os.path.join(path, x)):
+            return False
+    return True
+
+# Check to see if a given path is the root of an svn repo over http. We verify
+# this by requesting a version-controlled URL we know can't exist and looking
+# for the svn-specific "not found" XML.
+def httpcheck(ui, path, proto):
+    try:
+        opener = urllib2.build_opener()
+        rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
+        data = rsp.read()
+    except urllib2.HTTPError, inst:
+        if inst.code != 404:
+            # Except for 404 we cannot know for sure this is not an svn repo
+            ui.warn(_('svn: cannot probe remote repository, assume it could '
+                      'be a subversion repository. Use --source-type if you '
+                      'know better.\n'))
+            return True
+        data = inst.fp.read()
+    except:
+        # Could be urllib2.URLError if the URL is invalid or anything else.
+        return False
+    return '<m:human-readable errcode="160013">' in data
+
+protomap = {'http': httpcheck,
+            'https': httpcheck,
+            'file': filecheck,
+            }
+def issvnurl(ui, url):
+    try:
+        proto, path = url.split('://', 1)
+        if proto == 'file':
+            path = urllib.url2pathname(path)
+    except ValueError:
+        proto = 'file'
+        path = os.path.abspath(url)
+    if proto == 'file':
+        path = path.replace(os.sep, '/')
+    check = protomap.get(proto, lambda *args: False)
+    while '/' in path:
+        if check(ui, path, proto):
+            return True
+        path = path.rsplit('/', 1)[0]
+    return False
+
+# SVN conversion code stolen from bzr-svn and tailor
+#
+# Subversion looks like a versioned filesystem, branches structures
+# are defined by conventions and not enforced by the tool. First,
+# we define the potential branches (modules) as "trunk" and "branches"
+# children directories. Revisions are then identified by their
+# module and revision number (and a repository identifier).
+#
+# The revision graph is really a tree (or a forest). By default, a
+# revision parent is the previous revision in the same module. If the
+# module directory is copied/moved from another module then the
+# revision is the module root and its parent the source revision in
+# the parent module. A revision has at most one parent.
+#
+class svn_source(converter_source):
+    def __init__(self, ui, url, rev=None):
+        super(svn_source, self).__init__(ui, url, rev=rev)
+
+        if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
+                (os.path.exists(url) and
+                 os.path.exists(os.path.join(url, '.svn'))) or
+                issvnurl(ui, url)):
+            raise NoRepo(_("%s does not look like a Subversion repository")
+                         % url)
+
+        try:
+            SubversionException
+        except NameError:
+            raise MissingTool(_('Subversion python bindings could not be loaded'))
+
+        try:
+            version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
+            if version < (1, 4):
+                raise MissingTool(_('Subversion python bindings %d.%d found, '
+                                    '1.4 or later required') % version)
+        except AttributeError:
+            raise MissingTool(_('Subversion python bindings are too old, 1.4 '
+                                'or later required'))
+
+        self.lastrevs = {}
+
+        latest = None
+        try:
+            # Support file://path@rev syntax. Useful e.g. to convert
+            # deleted branches.
+            at = url.rfind('@')
+            if at >= 0:
+                latest = int(url[at + 1:])
+                url = url[:at]
+        except ValueError:
+            pass
+        self.url = geturl(url)
+        self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
+        try:
+            self.transport = transport.SvnRaTransport(url=self.url)
+            self.ra = self.transport.ra
+            self.ctx = self.transport.client
+            self.baseurl = svn.ra.get_repos_root(self.ra)
+            # Module is either empty or a repository path starting with
+            # a slash and not ending with a slash.
+            self.module = urllib.unquote(self.url[len(self.baseurl):])
+            self.prevmodule = None
+            self.rootmodule = self.module
+            self.commits = {}
+            self.paths = {}
+            self.uuid = svn.ra.get_uuid(self.ra)
+        except SubversionException:
+            ui.traceback()
+            raise NoRepo(_("%s does not look like a Subversion repository")
+                         % self.url)
+
+        if rev:
+            try:
+                latest = int(rev)
+            except ValueError:
+                raise util.Abort(_('svn: revision %s is not an integer') % rev)
+
+        self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
+        try:
+            self.startrev = int(self.startrev)
+            if self.startrev < 0:
+                self.startrev = 0
+        except ValueError:
+            raise util.Abort(_('svn: start revision %s is not an integer')
+                             % self.startrev)
+
+        self.head = self.latest(self.module, latest)
+        if not self.head:
+            raise util.Abort(_('no revision found in module %s')
+                             % self.module)
+        self.last_changed = self.revnum(self.head)
+
+        self._changescache = None
+
+        if os.path.exists(os.path.join(url, '.svn/entries')):
+            self.wc = url
+        else:
+            self.wc = None
+        self.convertfp = None
+
+    def setrevmap(self, revmap):
+        lastrevs = {}
+        for revid in revmap.iterkeys():
+            uuid, module, revnum = self.revsplit(revid)
+            lastrevnum = lastrevs.setdefault(module, revnum)
+            if revnum > lastrevnum:
+                lastrevs[module] = revnum
+        self.lastrevs = lastrevs
+
+    def exists(self, path, optrev):
+        try:
+            svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
+                                 optrev, False, self.ctx)
+            return True
+        except SubversionException:
+            return False
+
+    def getheads(self):
+
+        def isdir(path, revnum):
+            kind = self._checkpath(path, revnum)
+            return kind == svn.core.svn_node_dir
+
+        def getcfgpath(name, rev):
+            cfgpath = self.ui.config('convert', 'svn.' + name)
+            if cfgpath is not None and cfgpath.strip() == '':
+                return None
+            path = (cfgpath or name).strip('/')
+            if not self.exists(path, rev):
+                if cfgpath:
+                    raise util.Abort(_('expected %s to be at %r, but not found')
+                                 % (name, path))
+                return None
+            self.ui.note(_('found %s at %r\n') % (name, path))
+            return path
+
+        rev = optrev(self.last_changed)
+        oldmodule = ''
+        trunk = getcfgpath('trunk', rev)
+        self.tags = getcfgpath('tags', rev)
+        branches = getcfgpath('branches', rev)
+
+        # If the project has a trunk or branches, we will extract heads
+        # from them. We keep the project root otherwise.
+        if trunk:
+            oldmodule = self.module or ''
+            self.module += '/' + trunk
+            self.head = self.latest(self.module, self.last_changed)
+            if not self.head:
+                raise util.Abort(_('no revision found in module %s')
+                                 % self.module)
+
+        # First head in the list is the module's head
+        self.heads = [self.head]
+        if self.tags is not None:
+            self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
+
+        # Check if branches bring a few more heads to the list
+        if branches:
+            rpath = self.url.strip('/')
+            branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
+                                        rev, False, self.ctx)
+            for branch in branchnames.keys():
+                module = '%s/%s/%s' % (oldmodule, branches, branch)
+                if not isdir(module, self.last_changed):
+                    continue
+                brevid = self.latest(module, self.last_changed)
+                if not brevid:
+                    self.ui.note(_('ignoring empty branch %s\n') % branch)
+                    continue
+                self.ui.note(_('found branch %s at %d\n') %
+                             (branch, self.revnum(brevid)))
+                self.heads.append(brevid)
+
+        if self.startrev and self.heads:
+            if len(self.heads) > 1:
+                raise util.Abort(_('svn: start revision is not supported '
+                                   'with more than one branch'))
+            revnum = self.revnum(self.heads[0])
+            if revnum < self.startrev:
+                raise util.Abort(
+                    _('svn: no revision found after start revision %d')
+                                 % self.startrev)
+
+        return self.heads
+
+    def getfile(self, file, rev):
+        data, mode = self._getfile(file, rev)
+        self.modecache[(file, rev)] = mode
+        return data
+
+    def getmode(self, file, rev):
+        return self.modecache[(file, rev)]
+
+    def getchanges(self, rev):
+        if self._changescache and self._changescache[0] == rev:
+            return self._changescache[1]
+        self._changescache = None
+        self.modecache = {}
+        (paths, parents) = self.paths[rev]
+        if parents:
+            files, self.removed, copies = self.expandpaths(rev, paths, parents)
+        else:
+            # Perform a full checkout on roots
+            uuid, module, revnum = self.revsplit(rev)
+            entries = svn.client.ls(self.baseurl + urllib.quote(module),
+                                    optrev(revnum), True, self.ctx)
+            files = [n for n, e in entries.iteritems()
+                     if e.kind == svn.core.svn_node_file]
+            copies = {}
+            self.removed = set()
+
+        files.sort()
+        files = zip(files, [rev] * len(files))
+
+        # caller caches the result, so free it here to release memory
+        del self.paths[rev]
+        return (files, copies)
+
+    def getchangedfiles(self, rev, i):
+        changes = self.getchanges(rev)
+        self._changescache = (rev, changes)
+        return [f[0] for f in changes[0]]
+
+    def getcommit(self, rev):
+        if rev not in self.commits:
+            uuid, module, revnum = self.revsplit(rev)
+            self.module = module
+            self.reparent(module)
+            # We assume that:
+            # - requests for revisions after "stop" come from the
+            # revision graph backward traversal. Cache all of them
+            # down to stop, they will be used eventually.
+            # - requests for revisions before "stop" come to get
+            # isolated branches parents. Just fetch what is needed.
+            stop = self.lastrevs.get(module, 0)
+            if revnum < stop:
+                stop = revnum + 1
+            self._fetch_revisions(revnum, stop)
+        commit = self.commits[rev]
+        # caller caches the result, so free it here to release memory
+        del self.commits[rev]
+        return commit
+
+    def gettags(self):
+        tags = {}
+        if self.tags is None:
+            return tags
+
+        # svn tags are just a convention, project branches left in a
+        # 'tags' directory. There is no other relationship than
+        # ancestry, which is expensive to discover and makes them hard
+        # to update incrementally.  Worse, past revisions may be
+        # referenced by tags far away in the future, requiring a deep
+        # history traversal on every calculation.  Current code
+        # performs a single backward traversal, tracking moves within
+        # the tags directory (tag renaming) and recording a new tag
+        # everytime a project is copied from outside the tags
+        # directory. It also lists deleted tags, this behaviour may
+        # change in the future.
+        pendings = []
+        tagspath = self.tags
+        start = svn.ra.get_latest_revnum(self.ra)
+        try:
+            for entry in self._getlog([self.tags], start, self.startrev):
+                origpaths, revnum, author, date, message = entry
+                copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
+                          in origpaths.iteritems() if e.copyfrom_path]
+                # Apply moves/copies from more specific to general
+                copies.sort(reverse=True)
+
+                srctagspath = tagspath
+                if copies and copies[-1][2] == tagspath:
+                    # Track tags directory moves
+                    srctagspath = copies.pop()[0]
+
+                for source, sourcerev, dest in copies:
+                    if not dest.startswith(tagspath + '/'):
+                        continue
+                    for tag in pendings:
+                        if tag[0].startswith(dest):
+                            tagpath = source + tag[0][len(dest):]
+                            tag[:2] = [tagpath, sourcerev]
+                            break
+                    else:
+                        pendings.append([source, sourcerev, dest])
+
+                # Filter out tags with children coming from different
+                # parts of the repository like:
+                # /tags/tag.1 (from /trunk:10)
+                # /tags/tag.1/foo (from /branches/foo:12)
+                # Here/tags/tag.1 discarded as well as its children.
+                # It happens with tools like cvs2svn. Such tags cannot
+                # be represented in mercurial.
+                addeds = dict((p, e.copyfrom_path) for p, e
+                              in origpaths.iteritems()
+                              if e.action == 'A' and e.copyfrom_path)
+                badroots = set()
+                for destroot in addeds:
+                    for source, sourcerev, dest in pendings:
+                        if (not dest.startswith(destroot + '/')
+                            or source.startswith(addeds[destroot] + '/')):
+                            continue
+                        badroots.add(destroot)
+                        break
+
+                for badroot in badroots:
+                    pendings = [p for p in pendings if p[2] != badroot
+                                and not p[2].startswith(badroot + '/')]
+
+                # Tell tag renamings from tag creations
+                remainings = []
+                for source, sourcerev, dest in pendings:
+                    tagname = dest.split('/')[-1]
+                    if source.startswith(srctagspath):
+                        remainings.append([source, sourcerev, tagname])
+                        continue
+                    if tagname in tags:
+                        # Keep the latest tag value
+                        continue
+                    # From revision may be fake, get one with changes
+                    try:
+                        tagid = self.latest(source, sourcerev)
+                        if tagid and tagname not in tags:
+                            tags[tagname] = tagid
+                    except SvnPathNotFound:
+                        # It happens when we are following directories
+                        # we assumed were copied with their parents
+                        # but were really created in the tag
+                        # directory.
+                        pass
+                pendings = remainings
+                tagspath = srctagspath
+
+        except SubversionException:
+            self.ui.note(_('no tags found at revision %d\n') % start)
+        return tags
+
+    def converted(self, rev, destrev):
+        if not self.wc:
+            return
+        if self.convertfp is None:
+            self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
+                                  'a')
+        self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
+        self.convertfp.flush()
+
+    def revid(self, revnum, module=None):
+        return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
+
+    def revnum(self, rev):
+        return int(rev.split('@')[-1])
+
+    def revsplit(self, rev):
+        url, revnum = rev.rsplit('@', 1)
+        revnum = int(revnum)
+        parts = url.split('/', 1)
+        uuid = parts.pop(0)[4:]
+        mod = ''
+        if parts:
+            mod = '/' + parts[0]
+        return uuid, mod, revnum
+
+    def latest(self, path, stop=0):
+        """Find the latest revid affecting path, up to stop. It may return
+        a revision in a different module, since a branch may be moved without
+        a change being reported. Return None if computed module does not
+        belong to rootmodule subtree.
+        """
+        if not path.startswith(self.rootmodule):
+            # Requests on foreign branches may be forbidden at server level
+            self.ui.debug('ignoring foreign branch %r\n' % path)
+            return None
+
+        if not stop:
+            stop = svn.ra.get_latest_revnum(self.ra)
+        try:
+            prevmodule = self.reparent('')
+            dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
+            self.reparent(prevmodule)
+        except SubversionException:
+            dirent = None
+        if not dirent:
+            raise SvnPathNotFound(_('%s not found up to revision %d')
+                                  % (path, stop))
+
+        # stat() gives us the previous revision on this line of
+        # development, but it might be in *another module*. Fetch the
+        # log and detect renames down to the latest revision.
+        stream = self._getlog([path], stop, dirent.created_rev)
+        try:
+            for entry in stream:
+                paths, revnum, author, date, message = entry
+                if revnum <= dirent.created_rev:
+                    break
+
+                for p in paths:
+                    if not path.startswith(p) or not paths[p].copyfrom_path:
+                        continue
+                    newpath = paths[p].copyfrom_path + path[len(p):]
+                    self.ui.debug("branch renamed from %s to %s at %d\n" %
+                                  (path, newpath, revnum))
+                    path = newpath
+                    break
+        finally:
+            stream.close()
+
+        if not path.startswith(self.rootmodule):
+            self.ui.debug('ignoring foreign branch %r\n' % path)
+            return None
+        return self.revid(dirent.created_rev, path)
+
+    def reparent(self, module):
+        """Reparent the svn transport and return the previous parent."""
+        if self.prevmodule == module:
+            return module
+        svnurl = self.baseurl + urllib.quote(module)
+        prevmodule = self.prevmodule
+        if prevmodule is None:
+            prevmodule = ''
+        self.ui.debug("reparent to %s\n" % svnurl)
+        svn.ra.reparent(self.ra, svnurl)
+        self.prevmodule = module
+        return prevmodule
+
+    def expandpaths(self, rev, paths, parents):
+        changed, removed = set(), set()
+        # Map of entrypath, revision for finding source of deleted
+        # revisions.
+        copyfrom = {}
+        copies = {}
+
+        new_module, revnum = self.revsplit(rev)[1:]
+        if new_module != self.module:
+            self.module = new_module
+            self.reparent(self.module)
+
+        for path, ent in paths:
+            entrypath = self.getrelpath(path)
+
+            kind = self._checkpath(entrypath, revnum)
+            if kind == svn.core.svn_node_file:
+                changed.add(self.recode(entrypath))
+                if not ent.copyfrom_path or not parents:
+                    continue
+                # Copy sources not in parent revisions cannot be
+                # represented, ignore their origin for now
+                pmodule, prevnum = self.revsplit(parents[0])[1:]
+                if ent.copyfrom_rev < prevnum:
+                    continue
+                copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
+                if not copyfrom_path:
+                    continue
+                self.ui.debug("copied to %s from %s@%s\n" %
+                              (entrypath, copyfrom_path, ent.copyfrom_rev))
+                copies[self.recode(entrypath)] = self.recode(copyfrom_path)
+            elif kind == 0: # gone, but had better be a deleted *file*
+                self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
+                pmodule, prevnum = self.revsplit(parents[0])[1:]
+                parentpath = pmodule + "/" + entrypath
+                fromkind = self._checkpath(entrypath, prevnum, pmodule)
+
+                if fromkind == svn.core.svn_node_file:
+                    removed.add(self.recode(entrypath))
+                elif fromkind == svn.core.svn_node_dir:
+                    oroot = parentpath.strip('/')
+                    nroot = path.strip('/')
+                    children = self._find_children(oroot, prevnum)
+                    children = [s.replace(oroot, nroot) for s in children]
+                    for child in children:
+                        childpath = self.getrelpath("/" + child, pmodule)
+                        if childpath:
+                            removed.add(self.recode(childpath))
+                else:
+                    self.ui.debug('unknown path in revision %d: %s\n' % \
+                                  (revnum, path))
+            elif kind == svn.core.svn_node_dir:                
+                if ent.action == 'M':
+                    # If the directory just had a prop change,
+                    # then we shouldn't need to look for its children.
+                    continue
+                elif ent.action == 'R' and parents:
+                    # If a directory is replacing a file, mark the previous
+                    # file as deleted
+                    pmodule, prevnum = self.revsplit(parents[0])[1:]
+                    pkind = self._checkpath(entrypath, prevnum, pmodule)
+                    if pkind == svn.core.svn_node_file:
+                        removed.add(self.recode(entrypath))
+
+                children = sorted(self._find_children(path, revnum))
+                for child in children:
+                    # Can we move a child directory and its
+                    # parent in the same commit? (probably can). Could
+                    # cause problems if instead of revnum -1,
+                    # we have to look in (copyfrom_path, revnum - 1)
+                    entrypath = self.getrelpath("/" + child)
+                    if entrypath:
+                        # Need to filter out directories here...
+                        kind = self._checkpath(entrypath, revnum)
+                        if kind != svn.core.svn_node_dir:
+                            changed.add(self.recode(entrypath))
+
+                # Handle directory copies
+                if not ent.copyfrom_path or not parents:
+                    continue
+                # Copy sources not in parent revisions cannot be
+                # represented, ignore their origin for now
+                pmodule, prevnum = self.revsplit(parents[0])[1:]
+                if ent.copyfrom_rev < prevnum:
+                    continue
+                copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
+                if not copyfrompath:
+                    continue
+                copyfrom[path] = ent
+                self.ui.debug("mark %s came from %s:%d\n"
+                              % (path, copyfrompath, ent.copyfrom_rev))
+                children = self._find_children(ent.copyfrom_path, ent.copyfrom_rev)
+                children.sort()
+                for child in children:
+                    entrypath = self.getrelpath("/" + child, pmodule)
+                    if not entrypath:
+                        continue
+                    copytopath = path + entrypath[len(copyfrompath):]
+                    copytopath = self.getrelpath(copytopath)
+                    copies[self.recode(copytopath)] = self.recode(entrypath)
+
+        changed.update(removed)
+        return (list(changed), removed, copies)
+
+    def _fetch_revisions(self, from_revnum, to_revnum):
+        if from_revnum < to_revnum:
+            from_revnum, to_revnum = to_revnum, from_revnum
+
+        self.child_cset = None
+
+        def parselogentry(orig_paths, revnum, author, date, message):
+            """Return the parsed commit object or None, and True if
+            the revision is a branch root.
+            """
+            self.ui.debug("parsing revision %d (%d changes)\n" %
+                          (revnum, len(orig_paths)))
+
+            branched = False
+            rev = self.revid(revnum)
+            # branch log might return entries for a parent we already have
+
+            if rev in self.commits or revnum < to_revnum:
+                return None, branched
+
+            parents = []
+            # check whether this revision is the start of a branch or part
+            # of a branch renaming
+            orig_paths = sorted(orig_paths.iteritems())
+            root_paths = [(p, e) for p, e in orig_paths
+                          if self.module.startswith(p)]
+            if root_paths:
+                path, ent = root_paths[-1]
+                if ent.copyfrom_path:
+                    branched = True
+                    newpath = ent.copyfrom_path + self.module[len(path):]
+                    # ent.copyfrom_rev may not be the actual last revision
+                    previd = self.latest(newpath, ent.copyfrom_rev)
+                    if previd is not None:
+                        prevmodule, prevnum = self.revsplit(previd)[1:]
+                        if prevnum >= self.startrev:
+                            parents = [previd]
+                            self.ui.note(
+                                _('found parent of branch %s at %d: %s\n') %
+                                (self.module, prevnum, prevmodule))
+                else:
+                    self.ui.debug("no copyfrom path, don't know what to do.\n")
+
+            paths = []
+            # filter out unrelated paths
+            for path, ent in orig_paths:
+                if self.getrelpath(path) is None:
+                    continue
+                paths.append((path, ent))
+
+            # Example SVN datetime. Includes microseconds.
+            # ISO-8601 conformant
+            # '2007-01-04T17:35:00.902377Z'
+            date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
+
+            log = message and self.recode(message) or ''
+            author = author and self.recode(author) or ''
+            try:
+                branch = self.module.split("/")[-1]
+                if branch == 'trunk':
+                    branch = ''
+            except IndexError:
+                branch = None
+
+            cset = commit(author=author,
+                          date=util.datestr(date),
+                          desc=log,
+                          parents=parents,
+                          branch=branch,
+                          rev=rev)
+
+            self.commits[rev] = cset
+            # The parents list is *shared* among self.paths and the
+            # commit object. Both will be updated below.
+            self.paths[rev] = (paths, cset.parents)
+            if self.child_cset and not self.child_cset.parents:
+                self.child_cset.parents[:] = [rev]
+            self.child_cset = cset
+            return cset, branched
+
+        self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
+                     (self.module, from_revnum, to_revnum))
+
+        try:
+            firstcset = None
+            lastonbranch = False
+            stream = self._getlog([self.module], from_revnum, to_revnum)
+            try:
+                for entry in stream:
+                    paths, revnum, author, date, message = entry
+                    if revnum < self.startrev:
+                        lastonbranch = True
+                        break
+                    if not paths:
+                        self.ui.debug('revision %d has no entries\n' % revnum)
+                        # If we ever leave the loop on an empty
+                        # revision, do not try to get a parent branch
+                        lastonbranch = lastonbranch or revnum == 0
+                        continue
+                    cset, lastonbranch = parselogentry(paths, revnum, author,
+                                                       date, message)
+                    if cset:
+                        firstcset = cset
+                    if lastonbranch:
+                        break
+            finally:
+                stream.close()
+
+            if not lastonbranch and firstcset and not firstcset.parents:
+                # The first revision of the sequence (the last fetched one)
+                # has invalid parents if not a branch root. Find the parent
+                # revision now, if any.
+                try:
+                    firstrevnum = self.revnum(firstcset.rev)
+                    if firstrevnum > 1:
+                        latest = self.latest(self.module, firstrevnum - 1)
+                        if latest:
+                            firstcset.parents.append(latest)
+                except SvnPathNotFound:
+                    pass
+        except SubversionException, (inst, num):
+            if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
+                raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
+            raise
+
+    def _getfile(self, file, rev):
+        # TODO: ra.get_file transmits the whole file instead of diffs.
+        if file in self.removed:
+            raise IOError()
+        mode = ''
+        try:
+            new_module, revnum = self.revsplit(rev)[1:]
+            if self.module != new_module:
+                self.module = new_module
+                self.reparent(self.module)
+            io = StringIO()
+            info = svn.ra.get_file(self.ra, file, revnum, io)
+            data = io.getvalue()
+            # ra.get_files() seems to keep a reference on the input buffer
+            # preventing collection. Release it explicitely.
+            io.close()
+            if isinstance(info, list):
+                info = info[-1]
+            mode = ("svn:executable" in info) and 'x' or ''
+            mode = ("svn:special" in info) and 'l' or mode
+        except SubversionException, e:
+            notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
+                svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
+            if e.apr_err in notfound: # File not found
+                raise IOError()
+            raise
+        if mode == 'l':
+            link_prefix = "link "
+            if data.startswith(link_prefix):
+                data = data[len(link_prefix):]
+        return data, mode
+
+    def _find_children(self, path, revnum):
+        path = path.strip('/')
+        pool = Pool()
+        rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
+        return ['%s/%s' % (path, x) for x in
+                svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool).keys()]
+
+    def getrelpath(self, path, module=None):
+        if module is None:
+            module = self.module
+        # Given the repository url of this wc, say
+        #   "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
+        # extract the "entry" portion (a relative path) from what
+        # svn log --xml says, ie
+        #   "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
+        # that is to say "tests/PloneTestCase.py"
+        if path.startswith(module):
+            relative = path.rstrip('/')[len(module):]
+            if relative.startswith('/'):
+                return relative[1:]
+            elif relative == '':
+                return relative
+
+        # The path is outside our tracked tree...
+        self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
+        return None
+
+    def _checkpath(self, path, revnum, module=None):
+        if module is not None:
+            prevmodule = self.reparent('')
+            path = module + '/' + path
+        try:
+            # ra.check_path does not like leading slashes very much, it leads
+            # to PROPFIND subversion errors
+            return svn.ra.check_path(self.ra, path.strip('/'), revnum)
+        finally:
+            if module is not None:
+                self.reparent(prevmodule)
+    
+    def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
+                strict_node_history=False):
+        # Normalize path names, svn >= 1.5 only wants paths relative to
+        # supplied URL
+        relpaths = []
+        for p in paths:
+            if not p.startswith('/'):
+                p = self.module + '/' + p
+            relpaths.append(p.strip('/'))
+        args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
+                strict_node_history]
+        arg = encodeargs(args)
+        hgexe = util.hgexecutable()
+        cmd = '%s debugsvnlog' % util.shellquote(hgexe)
+        stdin, stdout = util.popen2(cmd)
+        stdin.write(arg)
+        try:
+            stdin.close()
+        except IOError:
+            raise util.Abort(_('Mercurial failed to run itself, check'
+                               ' hg executable is in PATH'))
+        return logstream(stdout)
+
+pre_revprop_change = '''#!/bin/sh
+
+REPOS="$1"
+REV="$2"
+USER="$3"
+PROPNAME="$4"
+ACTION="$5"
+
+if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
+if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
+if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
+
+echo "Changing prohibited revision property" >&2
+exit 1
+'''
+
+class svn_sink(converter_sink, commandline):
+    commit_re = re.compile(r'Committed revision (\d+).', re.M)
+
+    def prerun(self):
+        if self.wc:
+            os.chdir(self.wc)
+
+    def postrun(self):
+        if self.wc:
+            os.chdir(self.cwd)
+
+    def join(self, name):
+        return os.path.join(self.wc, '.svn', name)
+
+    def revmapfile(self):
+        return self.join('hg-shamap')
+
+    def authorfile(self):
+        return self.join('hg-authormap')
+
+    def __init__(self, ui, path):
+        converter_sink.__init__(self, ui, path)
+        commandline.__init__(self, ui, 'svn')
+        self.delete = []
+        self.setexec = []
+        self.delexec = []
+        self.copies = []
+        self.wc = None
+        self.cwd = os.getcwd()
+
+        path = os.path.realpath(path)
+
+        created = False
+        if os.path.isfile(os.path.join(path, '.svn', 'entries')):
+            self.wc = path
+            self.run0('update')
+        else:
+            wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
+
+            if os.path.isdir(os.path.dirname(path)):
+                if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
+                    ui.status(_('initializing svn repository %r\n') %
+                              os.path.basename(path))
+                    commandline(ui, 'svnadmin').run0('create', path)
+                    created = path
+                path = util.normpath(path)
+                if not path.startswith('/'):
+                    path = '/' + path
+                path = 'file://' + path
+
+            ui.status(_('initializing svn working copy %r\n')
+                      % os.path.basename(wcpath))
+            self.run0('checkout', path, wcpath)
+
+            self.wc = wcpath
+        self.opener = util.opener(self.wc)
+        self.wopener = util.opener(self.wc)
+        self.childmap = mapfile(ui, self.join('hg-childmap'))
+        self.is_exec = util.checkexec(self.wc) and util.is_exec or None
+
+        if created:
+            hook = os.path.join(created, 'hooks', 'pre-revprop-change')
+            fp = open(hook, 'w')
+            fp.write(pre_revprop_change)
+            fp.close()
+            util.set_flags(hook, False, True)
+
+        xport = transport.SvnRaTransport(url=geturl(path))
+        self.uuid = svn.ra.get_uuid(xport.ra)
+
+    def wjoin(self, *names):
+        return os.path.join(self.wc, *names)
+
+    def putfile(self, filename, flags, data):
+        if 'l' in flags:
+            self.wopener.symlink(data, filename)
+        else:
+            try:
+                if os.path.islink(self.wjoin(filename)):
+                    os.unlink(filename)
+            except OSError:
+                pass
+            self.wopener(filename, 'w').write(data)
+
+            if self.is_exec:
+                was_exec = self.is_exec(self.wjoin(filename))
+            else:
+                # On filesystems not supporting execute-bit, there is no way
+                # to know if it is set but asking subversion. Setting it
+                # systematically is just as expensive and much simpler.
+                was_exec = 'x' not in flags
+
+            util.set_flags(self.wjoin(filename), False, 'x' in flags)
+            if was_exec:
+                if 'x' not in flags:
+                    self.delexec.append(filename)
+            else:
+                if 'x' in flags:
+                    self.setexec.append(filename)
+
+    def _copyfile(self, source, dest):
+        # SVN's copy command pukes if the destination file exists, but
+        # our copyfile method expects to record a copy that has
+        # already occurred.  Cross the semantic gap.
+        wdest = self.wjoin(dest)
+        exists = os.path.exists(wdest)
+        if exists:
+            fd, tempname = tempfile.mkstemp(
+                prefix='hg-copy-', dir=os.path.dirname(wdest))
+            os.close(fd)
+            os.unlink(tempname)
+            os.rename(wdest, tempname)
+        try:
+            self.run0('copy', source, dest)
+        finally:
+            if exists:
+                try:
+                    os.unlink(wdest)
+                except OSError:
+                    pass
+                os.rename(tempname, wdest)
+
+    def dirs_of(self, files):
+        dirs = set()
+        for f in files:
+            if os.path.isdir(self.wjoin(f)):
+                dirs.add(f)
+            for i in strutil.rfindall(f, '/'):
+                dirs.add(f[:i])
+        return dirs
+
+    def add_dirs(self, files):
+        add_dirs = [d for d in sorted(self.dirs_of(files))
+                    if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
+        if add_dirs:
+            self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
+        return add_dirs
+
+    def add_files(self, files):
+        if files:
+            self.xargs(files, 'add', quiet=True)
+        return files
+
+    def tidy_dirs(self, names):
+        deleted = []
+        for d in sorted(self.dirs_of(names), reverse=True):
+            wd = self.wjoin(d)
+            if os.listdir(wd) == '.svn':
+                self.run0('delete', d)
+                deleted.append(d)
+        return deleted
+
+    def addchild(self, parent, child):
+        self.childmap[parent] = child
+
+    def revid(self, rev):
+        return u"svn:%s@%s" % (self.uuid, rev)
+
+    def putcommit(self, files, copies, parents, commit, source, revmap):
+        # Apply changes to working copy
+        for f, v in files:
+            try:
+                data = source.getfile(f, v)
+            except IOError:
+                self.delete.append(f)
+            else:
+                e = source.getmode(f, v)
+                self.putfile(f, e, data)
+                if f in copies:
+                    self.copies.append([copies[f], f])
+        files = [f[0] for f in files]
+
+        for parent in parents:
+            try:
+                return self.revid(self.childmap[parent])
+            except KeyError:
+                pass
+        entries = set(self.delete)
+        files = frozenset(files)
+        entries.update(self.add_dirs(files.difference(entries)))
+        if self.copies:
+            for s, d in self.copies:
+                self._copyfile(s, d)
+            self.copies = []
+        if self.delete:
+            self.xargs(self.delete, 'delete')
+            self.delete = []
+        entries.update(self.add_files(files.difference(entries)))
+        entries.update(self.tidy_dirs(entries))
+        if self.delexec:
+            self.xargs(self.delexec, 'propdel', 'svn:executable')
+            self.delexec = []
+        if self.setexec:
+            self.xargs(self.setexec, 'propset', 'svn:executable', '*')
+            self.setexec = []
+
+        fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
+        fp = os.fdopen(fd, 'w')
+        fp.write(commit.desc)
+        fp.close()
+        try:
+            output = self.run0('commit',
+                               username=util.shortuser(commit.author),
+                               file=messagefile,
+                               encoding='utf-8')
+            try:
+                rev = self.commit_re.search(output).group(1)
+            except AttributeError:
+                if not files:
+                    return parents[0]
+                self.ui.warn(_('unexpected svn output:\n'))
+                self.ui.warn(output)
+                raise util.Abort(_('unable to cope with svn output'))
+            if commit.rev:
+                self.run('propset', 'hg:convert-rev', commit.rev,
+                         revprop=True, revision=rev)
+            if commit.branch and commit.branch != 'default':
+                self.run('propset', 'hg:convert-branch', commit.branch,
+                         revprop=True, revision=rev)
+            for parent in parents:
+                self.addchild(parent, rev)
+            return self.revid(rev)
+        finally:
+            os.unlink(messagefile)
+
+    def puttags(self, tags):
+        self.ui.warn(_('XXX TAGS NOT IMPLEMENTED YET\n'))
diff --git a/plugins/hg4idea/testData/bin/hgext/convert/transport.py b/plugins/hg4idea/testData/bin/hgext/convert/transport.py
new file mode 100644 (file)
index 0000000..77cba41
--- /dev/null
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
+# This is a stripped-down version of the original bzr-svn transport.py,
+# Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+from svn.core import SubversionException, Pool
+import svn.ra
+import svn.client
+import svn.core
+
+# Some older versions of the Python bindings need to be
+# explicitly initialized. But what we want to do probably
+# won't work worth a darn against those libraries anyway!
+svn.ra.initialize()
+
+svn_config = svn.core.svn_config_get_config(None)
+
+
+def _create_auth_baton(pool):
+    """Create a Subversion authentication baton. """
+    import svn.client
+    # Give the client context baton a suite of authentication
+    # providers.h
+    providers = [
+        svn.client.get_simple_provider(pool),
+        svn.client.get_username_provider(pool),
+        svn.client.get_ssl_client_cert_file_provider(pool),
+        svn.client.get_ssl_client_cert_pw_file_provider(pool),
+        svn.client.get_ssl_server_trust_file_provider(pool),
+        ]
+    # Platform-dependant authentication methods
+    getprovider = getattr(svn.core, 'svn_auth_get_platform_specific_provider',
+                          None)
+    if getprovider:
+        # Available in svn >= 1.6
+        for name in ('gnome_keyring', 'keychain', 'kwallet', 'windows'):
+            for type in ('simple', 'ssl_client_cert_pw', 'ssl_server_trust'):
+                p = getprovider(name, type, pool)
+                if p:
+                    providers.append(p)
+    else:
+        if hasattr(svn.client, 'get_windows_simple_provider'):
+            providers.append(svn.client.get_windows_simple_provider(pool))
+
+    return svn.core.svn_auth_open(providers, pool)
+
+class NotBranchError(SubversionException):
+    pass
+
+class SvnRaTransport(object):
+    """
+    Open an ra connection to a Subversion repository.
+    """
+    def __init__(self, url="", ra=None):
+        self.pool = Pool()
+        self.svn_url = url
+        self.username = ''
+        self.password = ''
+
+        # Only Subversion 1.4 has reparent()
+        if ra is None or not hasattr(svn.ra, 'reparent'):
+            self.client = svn.client.create_context(self.pool)
+            ab = _create_auth_baton(self.pool)
+            if False:
+                svn.core.svn_auth_set_parameter(
+                    ab, svn.core.SVN_AUTH_PARAM_DEFAULT_USERNAME, self.username)
+                svn.core.svn_auth_set_parameter(
+                    ab, svn.core.SVN_AUTH_PARAM_DEFAULT_PASSWORD, self.password)
+            self.client.auth_baton = ab
+            self.client.config = svn_config
+            try:
+                self.ra = svn.client.open_ra_session(
+                    self.svn_url.encode('utf8'),
+                    self.client, self.pool)
+            except SubversionException, (inst, num):
+                if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
+                           svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
+                           svn.core.SVN_ERR_BAD_URL):
+                    raise NotBranchError(url)
+                raise
+        else:
+            self.ra = ra
+            svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
+
+    class Reporter(object):
+        def __init__(self, (reporter, report_baton)):
+            self._reporter = reporter
+            self._baton = report_baton
+
+        def set_path(self, path, revnum, start_empty, lock_token, pool=None):
+            svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
+                        path, revnum, start_empty, lock_token, pool)
+
+        def delete_path(self, path, pool=None):
+            svn.ra.reporter2_invoke_delete_path(self._reporter, self._baton,
+                    path, pool)
+
+        def link_path(self, path, url, revision, start_empty, lock_token,
+                      pool=None):
+            svn.ra.reporter2_invoke_link_path(self._reporter, self._baton,
+                    path, url, revision, start_empty, lock_token,
+                    pool)
+
+        def finish_report(self, pool=None):
+            svn.ra.reporter2_invoke_finish_report(self._reporter,
+                    self._baton, pool)
+
+        def abort_report(self, pool=None):
+            svn.ra.reporter2_invoke_abort_report(self._reporter,
+                    self._baton, pool)
+
+    def do_update(self, revnum, path, *args, **kwargs):
+        return self.Reporter(svn.ra.do_update(self.ra, revnum, path,
+                                              *args, **kwargs))
diff --git a/plugins/hg4idea/testData/bin/hgext/extdiff.py b/plugins/hg4idea/testData/bin/hgext/extdiff.py
new file mode 100644 (file)
index 0000000..6fca94a
--- /dev/null
@@ -0,0 +1,283 @@
+# extdiff.py - external diff program support for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''command to allow external programs to compare revisions
+
+The extdiff Mercurial extension allows you to use external programs
+to compare revisions, or revision with working directory. The external
+diff programs are called with a configurable set of options and two
+non-option arguments: paths to directories containing snapshots of
+files to compare.
+
+The extdiff extension also allows to configure new diff commands, so
+you do not need to type "hg extdiff -p kdiff3" always. ::
+
+  [extdiff]
+  # add new command that runs GNU diff(1) in 'context diff' mode
+  cdiff = gdiff -Nprc5
+  ## or the old way:
+  #cmd.cdiff = gdiff
+  #opts.cdiff = -Nprc5
+
+  # add new command called vdiff, runs kdiff3
+  vdiff = kdiff3
+
+  # add new command called meld, runs meld (no need to name twice)
+  meld =
+
+  # add new command called vimdiff, runs gvimdiff with DirDiff plugin
+  # (see http://www.vim.org/scripts/script.php?script_id=102) Non
+  # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
+  # your .vimrc
+  vimdiff = gvim -f '+next' '+execute "DirDiff" argv(0) argv(1)'
+
+You can use -I/-X and list of file or directory names like normal "hg
+diff" command. The extdiff extension makes snapshots of only needed
+files, so running the external diff program will actually be pretty
+fast (at least faster than having to compare the entire tree).
+'''
+
+from mercurial.i18n import _
+from mercurial.node import short, nullid
+from mercurial import cmdutil, util, commands, encoding
+import os, shlex, shutil, tempfile, re
+
+def snapshot(ui, repo, files, node, tmproot):
+    '''snapshot files as of some revision
+    if not using snapshot, -I/-X does not work and recursive diff
+    in tools like kdiff3 and meld displays too many files.'''
+    dirname = os.path.basename(repo.root)
+    if dirname == "":
+        dirname = "root"
+    if node is not None:
+        dirname = '%s.%s' % (dirname, short(node))
+    base = os.path.join(tmproot, dirname)
+    os.mkdir(base)
+    if node is not None:
+        ui.note(_('making snapshot of %d files from rev %s\n') %
+                (len(files), short(node)))
+    else:
+        ui.note(_('making snapshot of %d files from working directory\n') %
+            (len(files)))
+    wopener = util.opener(base)
+    fns_and_mtime = []
+    ctx = repo[node]
+    for fn in files:
+        wfn = util.pconvert(fn)
+        if not wfn in ctx:
+            # File doesn't exist; could be a bogus modify
+            continue
+        ui.note('  %s\n' % wfn)
+        dest = os.path.join(base, wfn)
+        fctx = ctx[wfn]
+        data = repo.wwritedata(wfn, fctx.data())
+        if 'l' in fctx.flags():
+            wopener.symlink(data, wfn)
+        else:
+            wopener(wfn, 'w').write(data)
+            if 'x' in fctx.flags():
+                util.set_flags(dest, False, True)
+        if node is None:
+            fns_and_mtime.append((dest, repo.wjoin(fn), os.path.getmtime(dest)))
+    return dirname, fns_and_mtime
+
+def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
+    '''Do the actuall diff:
+
+    - copy to a temp structure if diffing 2 internal revisions
+    - copy to a temp structure if diffing working revision with
+      another one and more than 1 file is changed
+    - just invoke the diff for a single file in the working dir
+    '''
+
+    revs = opts.get('rev')
+    change = opts.get('change')
+    args = ' '.join(diffopts)
+    do3way = '$parent2' in args
+
+    if revs and change:
+        msg = _('cannot specify --rev and --change at the same time')
+        raise util.Abort(msg)
+    elif change:
+        node2 = repo.lookup(change)
+        node1a, node1b = repo.changelog.parents(node2)
+    else:
+        node1a, node2 = cmdutil.revpair(repo, revs)
+        if not revs:
+            node1b = repo.dirstate.parents()[1]
+        else:
+            node1b