[go: up one dir, main page]

File: cache.cpp

package info (click to toggle)
qgit 2.12-1~exp1
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 2,364 kB
  • sloc: cpp: 13,251; xml: 47; sh: 25; javascript: 16; makefile: 3
file content (172 lines) | stat: -rw-r--r-- 4,010 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
/*
	Description: file names persistent cache

	Author: Marco Costalba (C) 2005-2007

	Copyright: See COPYING file that comes with this distribution

*/
#include <QFile>
#include <QDataStream>
#include <QDir>
#include "cache.h"

using namespace QGit;

bool Cache::save(const QString& gitDir, const RevFileMap& rf,
                 const StrVect& dirs, const StrVect& files) {

	if (gitDir.isEmpty() || rf.isEmpty())
		return false;

	QString path(gitDir + C_DAT_FILE);
	QString tmpPath(path + BAK_EXT);

	QDir dir;
	if (!dir.exists(gitDir)) {
		dbs("Git directory not found, unable to save cache");
		return false;
	}
	QFile f(tmpPath);
	if (!f.open(QIODevice::WriteOnly | QIODevice::Unbuffered))
		return false;

	dbs("Saving cache. Please wait...");

	// compress in memory before write to file
	QByteArray data;
	QDataStream stream(&data, QIODevice::WriteOnly);

	// Write a header with a "magic number" and a version
	stream << (quint32)C_MAGIC;
	stream << (qint32)C_VERSION;

	stream << (qint32)dirs.count();
	for (int i = 0; i < dirs.count(); ++i)
		stream << dirs.at(i);

	stream << (qint32)files.count();
	for (int i = 0; i < files.count(); ++i)
		stream << files.at(i);

	// to achieve a better compression we save the sha's as
	// one very long string instead of feeding the stream with
	// each one. With this trick we gain a 15% size reduction
	// in the final compressed file. The save/load speed is
	// almost the same.
	uint bufSize = rf.count() * 41 + 1000; // a little bit more space then required

	QByteArray buf;
	buf.reserve(bufSize);

	QVector<const RevFile*> v;
	v.reserve(rf.count());

	QVector<QByteArray> ba;
	ShaString CUSTOM_SHA_RAW(toPersistentSha(CUSTOM_SHA, ba));
	unsigned int newSize = 0;

	FOREACH (RevFileMap, it, rf) {

		const ShaString& sha = it.key();
		if (   sha == ZERO_SHA_RAW
		    || sha == CUSTOM_SHA_RAW
		    || sha.latin1()[0] == 'A') // ALL_MERGE_FILES + rev sha
			continue;

		v.append(it.value());
		buf.append(sha.latin1()).append('\0');
		newSize += 41;
		if (newSize > bufSize) {
			dbs("ASSERT in Cache::save, out of allocated space");
			return false;
		}
	}
	buf.resize(newSize);
	stream << (qint32)newSize;
	stream << buf;

	for (int i = 0; i < v.size(); ++i)
		*(v.at(i)) >> stream;

	dbs("Compressing data...");
	f.write(qCompress(data, 1)); // no need to encode with compressed data
	f.close();

	// rename C_DAT_FILE + BAK_EXT -> C_DAT_FILE
	if (dir.exists(path)) {
		if (!dir.remove(path)) {
			dbs("access denied to " + path);
			dir.remove(tmpPath);
			return false;
		}
	}
	dir.rename(tmpPath, path);
	dbs("Done.");
	return true;
}

bool Cache::load(const QString& gitDir, RevFileMap& rfm,
                 StrVect& dirs, StrVect& files, QByteArray& revsFilesShaBuf) {

	// check for cache file
	QString path(gitDir + C_DAT_FILE);
	QFile f(path);
	if (!f.exists())
		return true; // no cache file is not an error

	if (!f.open(QIODevice::ReadOnly | QIODevice::Unbuffered))
		return false;

	QDataStream stream(qUncompress(f.readAll()));
	quint32 magic;
	qint32 version;
	qint32 dirsNum, filesNum, bufSize;
	stream >> magic;
	stream >> version;
	if (magic != C_MAGIC || version != C_VERSION) {
		f.close();
		return false;
	}
	// read the data
	stream >> dirsNum;
	dirs.resize(dirsNum);
	for (int i = 0; i < dirsNum; ++i)
		stream >> dirs[i];

	stream >> filesNum;
	files.resize(filesNum);
	for (int i = 0; i < filesNum; ++i)
		stream >> files[i];

	stream >> bufSize;
	revsFilesShaBuf.clear();
	revsFilesShaBuf.reserve(bufSize);
	stream >> revsFilesShaBuf;

	const char* data = revsFilesShaBuf.constData();
	const char* endptr = &*revsFilesShaBuf.constEnd();

	while (!stream.atEnd()) {

		RevFile* rf = new RevFile();
		*rf << stream;

		if (data > endptr - 41) {
			dbs("ASSERT in Cache::load, corrupted SHA");
			return false;
		}

		ShaString sha(data);
		rfm.insert(sha, rf);

		data += 40;
		if (*data != '\0') {
			dbp("ASSERT in Cache::load, corrupted SHA after %1", sha);
			return false;
		}
		data++;
	}
	f.close();
	return true;
}