+#!/usr/bin/python
#
# ljdump.py - livejournal archiver
# Greg Hewgill <greg@hewgill.com> http://hewgill.com
-# Version 1.0
+# Version 1.2
#
# $Id$
#
# clear; the livejournal "challenge" password mechanism is used.
#
# This program may be run as often as needed to bring the backup copy up
-# to date. Only new items are downloaded.
+# to date. Both new and updated items are downloaded.
+#
+# The community http://ljdump.livejournal.com has been set up for questions
+# or comments.
#
# LICENSE
#
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
#
-# Copyright (c) 2005 Greg Hewgill
+# Copyright (c) 2005-2006 Greg Hewgill
-import codecs, md5, os, pprint, sys, xml.dom.minidom, xmlrpclib
+import codecs, md5, os, pickle, pprint, re, shutil, sys, urllib2, xml.dom.minidom, xmlrpclib
from xml.sax import saxutils
+MimeExtensions = {
+ "image/gif": ".gif",
+ "image/jpeg": ".jpg",
+ "image/png": ".png",
+}
+
+def calcchallenge(challenge, password):
+ return md5.new(challenge+md5.new(password).hexdigest()).hexdigest()
+
+def flatresponse(response):
+ r = {}
+ while True:
+ name = response.readline()
+ if len(name) == 0:
+ break
+ if name[-1] == '\n':
+ name = name[:len(name)-1]
+ value = response.readline()
+ if value[-1] == '\n':
+ value = value[:len(value)-1]
+ r[name] = value
+ return r
+
+def getljsession(username, password):
+ r = urllib2.urlopen(Server+"/interface/flat", "mode=getchallenge")
+ response = flatresponse(r)
+ r.close()
+ r = urllib2.urlopen(Server+"/interface/flat", "mode=sessiongenerate&user=%s&auth_method=challenge&auth_challenge=%s&auth_response=%s" % (username, response['challenge'], calcchallenge(response['challenge'], password)))
+ response = flatresponse(r)
+ r.close()
+ return response['ljsession']
+
def dochallenge(params, password):
challenge = server.LJ.XMLRPC.getchallenge()
params.update({
'auth_method': "challenge",
'auth_challenge': challenge['challenge'],
- 'auth_response': md5.new(challenge['challenge']+md5.new(password).hexdigest()).hexdigest()
+ 'auth_response': calcchallenge(challenge['challenge'], password)
})
return params
dumpelement(f, "event", event)
f.close()
+def writelast():
+ f = open("%s/.last" % Username, "w")
+ f.write("%s\n" % lastsync)
+ f.write("%s\n" % lastmaxid)
+ f.close()
+
+def createxml(doc, name, map):
+ e = doc.createElement(name)
+ for k in map.keys():
+ me = doc.createElement(k)
+ me.appendChild(doc.createTextNode(map[k]))
+ e.appendChild(me)
+ return e
+
+def gettext(e):
+ if len(e) == 0:
+ return ""
+ return e[0].firstChild.nodeValue
+
config = xml.dom.minidom.parse("ljdump.config")
Server = config.documentElement.getElementsByTagName("server")[0].childNodes[0].data
Username = config.documentElement.getElementsByTagName("username")[0].childNodes[0].data
Password = config.documentElement.getElementsByTagName("password")[0].childNodes[0].data
+m = re.search("(.*)/interface/xmlrpc", Server)
+if m:
+ Server = m.group(1)
+
print "Fetching journal entries for: %s" % Username
try:
os.mkdir(Username)
except:
pass
-server = xmlrpclib.ServerProxy(Server)
+ljsession = getljsession(Username, Password)
+
+server = xmlrpclib.ServerProxy(Server+"/interface/xmlrpc")
-total = 0
-fetched = 0
+newentries = 0
+newcomments = 0
errors = 0
-last = ""
+lastsync = ""
+lastmaxid = 0
+try:
+ f = open("%s/.last" % Username, "r")
+ lastsync = f.readline()
+ if lastsync[-1] == '\n':
+ lastsync = lastsync[:len(lastsync)-1]
+ lastmaxid = f.readline()
+ if len(lastmaxid) > 0 and lastmaxid[-1] == '\n':
+ lastmaxid = lastmaxid[:len(lastmaxid)-1]
+ if lastmaxid == "":
+ lastmaxid = 0
+ else:
+ lastmaxid = int(lastmaxid)
+ f.close()
+except:
+ pass
+origlastsync = lastsync
+
+r = server.LJ.XMLRPC.login(dochallenge({
+ 'username': Username,
+ 'ver': 1,
+ 'getpickws': 1,
+ 'getpickwurls': 1,
+}, Password))
+userpics = dict(zip(map(str, r['pickws']), r['pickwurls']))
+userpics['*'] = r['defaultpicurl']
+
while True:
r = server.LJ.XMLRPC.syncitems(dochallenge({
'username': Username,
'ver': 1,
- 'lastsync': last,
+ 'lastsync': lastsync,
}, Password))
#pprint.pprint(r)
if len(r['syncitems']) == 0:
break
for item in r['syncitems']:
if item['item'][0] == 'L':
- fn = "%s/%s" % (Username, item['item'])
- if not os.access(fn, os.F_OK):
- print "Fetching journal entry %s" % item['item']
- try:
- e = server.LJ.XMLRPC.getevents(dochallenge({
- 'username': Username,
- 'ver': 1,
- 'selecttype': "one",
- 'itemid': item['item'][2:],
- }, Password))
- writedump(fn, e['events'][0])
- fetched += 1
- except xmlrpclib.Fault, x:
- print "Error getting item: %s" % item['item']
- pprint.pprint(x)
+ print "Fetching journal entry %s (%s)" % (item['item'], item['action'])
+ try:
+ e = server.LJ.XMLRPC.getevents(dochallenge({
+ 'username': Username,
+ 'ver': 1,
+ 'selecttype': "one",
+ 'itemid': item['item'][2:],
+ }, Password))
+ if e['events']:
+ writedump("%s/%s" % (Username, item['item']), e['events'][0])
+ newentries += 1
+ else:
+ print "Unexpected empty item: %s" % item['item']
errors += 1
- last = item['time']
- total += 1
-print "%d total entries" % total
-print "%d fetched entries" % fetched
+ except xmlrpclib.Fault, x:
+ print "Error getting item: %s" % item['item']
+ pprint.pprint(x)
+ errors += 1
+ lastsync = item['time']
+ writelast()
+
+# The following code doesn't work because the server rejects our repeated calls.
+# http://www.livejournal.com/doc/server/ljp.csp.xml-rpc.getevents.html
+# contains the statement "You should use the syncitems selecttype in
+# conjuntions [sic] with the syncitems protocol mode", but provides
+# no other explanation about how these two function calls should
+# interact. Therefore we just do the above slow one-at-a-time method.
+
+#while True:
+# r = server.LJ.XMLRPC.getevents(dochallenge({
+# 'username': Username,
+# 'ver': 1,
+# 'selecttype': "syncitems",
+# 'lastsync': lastsync,
+# }, Password))
+# pprint.pprint(r)
+# if len(r['events']) == 0:
+# break
+# for item in r['events']:
+# writedump("%s/L-%d" % (Username, item['itemid']), item)
+# newentries += 1
+# lastsync = item['eventtime']
+
+print "Fetching journal comments for: %s" % Username
+
+try:
+ f = open("%s/comment.meta" % Username)
+ metacache = pickle.load(f)
+ f.close()
+except:
+ metacache = {}
+
+try:
+ f = open("%s/user.map" % Username)
+ usermap = pickle.load(f)
+ f.close()
+except:
+ usermap = {}
+
+maxid = lastmaxid
+while True:
+ r = urllib2.urlopen(urllib2.Request(Server+"/export_comments.bml?get=comment_meta&startid=%d" % (maxid+1), headers = {'Cookie': "ljsession="+ljsession}))
+ meta = xml.dom.minidom.parse(r)
+ r.close()
+ for c in meta.getElementsByTagName("comment"):
+ id = int(c.getAttribute("id"))
+ metacache[id] = {
+ 'posterid': c.getAttribute("posterid"),
+ 'state': c.getAttribute("state"),
+ }
+ if id > maxid:
+ maxid = id
+ for u in meta.getElementsByTagName("usermap"):
+ usermap[u.getAttribute("id")] = u.getAttribute("user")
+ if maxid >= int(meta.getElementsByTagName("maxid")[0].firstChild.nodeValue):
+ break
+
+f = open("%s/comment.meta" % Username, "w")
+pickle.dump(metacache, f)
+f.close()
+
+f = open("%s/user.map" % Username, "w")
+pickle.dump(usermap, f)
+f.close()
+
+print "Fetching userpics for: %s" % Username
+f = open("%s/userpics.xml" % Username, "w")
+print >>f, """<?xml version="1.0"?>"""
+print >>f, "<userpics>"
+for p in userpics:
+ print >>f, """<userpic keyword="%s" url="%s" />""" % (p, userpics[p])
+ pic = urllib2.urlopen(userpics[p])
+ ext = MimeExtensions.get(pic.info()["Content-Type"], "")
+ picf = open("%s/%s%s" % (Username, codecs.utf_8_decode(p)[0], ext), "wb")
+ shutil.copyfileobj(pic, picf)
+ pic.close()
+ picf.close()
+print >>f, "</userpics>"
+f.close()
+
+newmaxid = maxid
+maxid = lastmaxid
+while True:
+ r = urllib2.urlopen(urllib2.Request(Server+"/export_comments.bml?get=comment_body&startid=%d" % (maxid+1), headers = {'Cookie': "ljsession="+ljsession}))
+ meta = xml.dom.minidom.parse(r)
+ r.close()
+ for c in meta.getElementsByTagName("comment"):
+ id = int(c.getAttribute("id"))
+ jitemid = c.getAttribute("jitemid")
+ comment = {
+ 'id': str(id),
+ 'parentid': c.getAttribute("parentid"),
+ 'subject': gettext(c.getElementsByTagName("subject")),
+ 'date': gettext(c.getElementsByTagName("date")),
+ 'body': gettext(c.getElementsByTagName("body")),
+ 'state': metacache[id]['state'],
+ }
+ if usermap.has_key(c.getAttribute("posterid")):
+ comment["user"] = usermap[c.getAttribute("posterid")]
+ try:
+ entry = xml.dom.minidom.parse("%s/C-%s" % (Username, jitemid))
+ except:
+ entry = xml.dom.minidom.getDOMImplementation().createDocument(None, "comments", None)
+ found = False
+ for d in entry.getElementsByTagName("comment"):
+ if int(d.getElementsByTagName("id")[0].firstChild.nodeValue) == id:
+ found = True
+ break
+ if found:
+ print "Warning: downloaded duplicate comment id %d in jitemid %s" % (id, jitemid)
+ else:
+ entry.documentElement.appendChild(createxml(entry, "comment", comment))
+ f = codecs.open("%s/C-%s" % (Username, jitemid), "w", "UTF-8")
+ entry.writexml(f)
+ f.close()
+ newcomments += 1
+ if id > maxid:
+ maxid = id
+ if maxid >= newmaxid:
+ break
+
+lastmaxid = maxid
+
+writelast()
+
+if origlastsync:
+ print "%d new entries, %d new comments (since %s)" % (newentries, newcomments, origlastsync)
+else:
+ print "%d new entries, %d new comments" % (newentries, newcomments)
if errors > 0:
print "%d errors" % errors