changeset 39:2b459f9743b4

Test groups are now supported
author Oleg Oshmyan <chortos@inbox.lv>
date Fri, 03 Dec 2010 02:46:06 +0000
parents a6d554679ce8
children af9c45708987
files 2.00/problem.py 2.00/testcases.py
diffstat 2 files changed, 107 insertions(+), 60 deletions(-) [+]
line wrap: on
line diff
--- a/2.00/problem.py	Fri Dec 03 02:45:56 2010 +0000
+++ b/2.00/problem.py	Fri Dec 03 02:46:06 2010 +0000
@@ -30,7 +30,7 @@
 				signalnames[value] = name
 	del unixnames
 
-__all__ = 'Problem',
+__all__ = 'Problem', 'TestContext', 'test_context_end', 'TestGroup'
 
 def strerror(e):
 	s = getattr(e, 'strerror')
@@ -41,6 +41,49 @@
 	def __init__(self, mydict):
 		self.__dict__ = mydict
 
+class TestContext(object):
+	pass
+
+test_context_end = object()
+
+class TestGroup(TestContext):
+	__slots__ = 'case', 'log', 'correct', 'allcorrect', 'real', 'max', 'ntotal', 'nvalued', 'ncorrect', 'ncorrectvalued'
+	
+	def __init__(self):
+		self.real = self.max = self.ntotal = self.nvalued = self.ncorrect = self.ncorrectvalued = 0
+		self.allcorrect = True
+		self.log = []
+	
+	def case_start(self, case):
+		self.case = case
+		self.correct = False
+		self.ntotal += 1
+		self.max += case.points
+		if case.points:
+			self.nvalued += 1
+	
+	def case_correct(self):
+		self.correct = True
+		self.ncorrect += 1
+		if self.case.points:
+			self.ncorrectvalued += 1
+	
+	def case_end(self, granted):
+		self.log.append((self.case, self.correct, granted))
+		self.real += granted
+		del self.case
+		if not self.correct:
+			self.allcorrect = False
+	
+	def end(self):
+		say('Group total: %d/%d tests; %d/%d points' % (self.ncorrect, self.ntotal, self.real if self.allcorrect else 0, self.max))
+		# No real need to flush stdout, as it will anyway be flushed in a moment,
+		# when either the problem total or the next test case's ID is printed
+		if self.allcorrect:
+			return self.log
+		else:
+			return ((case, correct, 0) for case, correct, granted in self.log)
+
 class Problem(object):
 	__slots__ = 'name', 'config', 'cache', 'testcases'
 	
@@ -51,7 +94,7 @@
 		prob.name = name
 		prob.config = config.load_problem(name)
 		if not getattr(prob.config, 'kind', None): prob.config.kind = 'batch'
-		prob.cache = Cache({'padoutput': 0, 'usegroups': False})
+		prob.cache = Cache({'padoutput': 0})
 		prob.testcases = testcases.load_problem(prob)
 	
 	# TODO
@@ -61,11 +104,19 @@
 	def test(prob):
 		case = None
 		try:
-			real = max = ntotal = nvalued = ncorrect = ncorrectvalued = 0
+			contexts = [TestGroup()]
 			for case in prob.testcases:
-				ntotal += 1
-				max += case.points
-				if case.points: nvalued += 1
+				if case is test_context_end:
+					for case, correct, granted in contexts.pop().end():
+						contexts[-1].case_start(case)
+						if correct:
+							contexts[-1].case_correct()
+						contexts[-1].case_end(granted)
+					continue
+				elif isinstance(case, TestContext):
+					contexts.append(case)
+					continue
+				contexts[-1].case_start(case)
 				granted = 0
 				id = str(case.id)
 				if case.isdummy:
@@ -110,15 +161,15 @@
 				#except Exception:
 				#	verdict = 'unknown error [this may be a bug in test.py]%s' % strerror(sys.exc_info()[1])
 				else:
-					if hasattr(granted, '__iter__'):
+					try:
 						granted, comment = granted
+					except TypeError:
+						comment = ''
+					else:
 						if comment:
 							comment = ' (%s)' % comment
-					else:
-						comment = ''
 					if granted >= 1:
-						ncorrect += 1
-						if case.points: ncorrectvalued += 1
+						contexts[-1].case_correct()
 						verdict = 'OK' + comment
 					elif not granted:
 						verdict = 'wrong answer' + comment
@@ -126,12 +177,13 @@
 						verdict = 'partly correct' + comment
 					granted *= case.points
 				say('%g/%g, %s' % (granted, case.points, verdict))
-				real += granted
-			weighted = real * prob.config.taskweight / max if max else 0
-			if nvalued != ntotal:
-				say('Problem total: %d/%d tests (%d/%d valued); %g/%g points; weighted score: %g/%g' % (ncorrect, ntotal, ncorrectvalued, nvalued, real, max, weighted, prob.config.taskweight))
+				contexts[-1].case_end(granted)
+			weighted = contexts[0].real * prob.config.taskweight / contexts[0].max if contexts[0].max else 0
+			if contexts[0].nvalued != contexts[0].ntotal:
+				say('Problem total: %d/%d tests (%d/%d valued); %g/%g points; weighted score: %g/%g' % (contexts[0].ncorrect, contexts[0].ntotal, contexts[0].ncorrectvalued, contexts[0].nvalued, contexts[0].real, contexts[0].max, weighted, prob.config.taskweight))
 			else:
-				say('Problem total: %d/%d tests; %g/%g points; weighted score: %g/%g' % (ncorrect, ntotal, real, max, weighted, prob.config.taskweight))
+				say('Problem total: %d/%d tests; %g/%g points; weighted score: %g/%g' % (contexts[0].ncorrect, contexts[0].ntotal, contexts[0].real, contexts[0].max, weighted, prob.config.taskweight))
+			sys.stdout.flush()
 			return weighted, prob.config.taskweight
 		finally:
 			if options.erase and (not prob.config.stdio or case and case.validator):
--- a/2.00/testcases.py	Fri Dec 03 02:45:56 2010 +0000
+++ b/2.00/testcases.py	Fri Dec 03 02:46:06 2010 +0000
@@ -488,63 +488,58 @@
                                'outonly' : OutputOnlyTestCase,
                                'bestout' : BestOutputTestCase,
                                'reactive': ReactiveTestCase}):
+	# We will need to iterate over these configuration variables twice
+	try:
+		len(prob.config.dummies)
+	except Exception:
+		prob.config.dummies = tuple(prob.config.dummies)
+	try:
+		len(prob.config.tests)
+	except Exception:
+		prob.config.tests = tuple(prob.config.tests)
+	
 	if options.legacy:
 		prob.config.usegroups = False
 		prob.config.tests = list(prob.config.tests)
 		for i, name in enumerate(prob.config.tests):
+			# Same here; we'll need to iterate over them twice
 			try:
-				if len(name) > 1:
-					prob.config.usegroups = True
-					break
-				elif len(name):
-					prob.config.tests[i] = name[0]
+				l = len(name)
 			except Exception:
 				try:
-					# Try to retrieve the first two test case ID's and cache them on success
-					prob.config.tests[i] = name = iter(name)
+					name = tuple(name)
 				except TypeError:
-					continue
-				try:
-					try:
-						first = next(name)
-					except NameError:
-						# Python 2.5 lacks the next() built-in
-						first = name.next()
-				except StopIteration:
-					prob.config.tests[i] = ()
-				else:
-					try:
-						try:
-							second = next(name)
-						except NameError:
-							second = name.next()
-					except StopIteration:
-						prob.config.tests[i] = first
-					else:
-						prob.config.tests[i] = itertools.chain((first, second), name)
-						prob.config.usegroups = True
-						break
+					name = (name,)
+				l = len(name)
+			if len(name) > 1:
+				prob.config.usegroups = True
+				break
+			elif not len(name):
+				prob.config.tests[i] = (name,)
+	
+	# First get prob.cache.padoutput right,
+	# then yield the actual test cases
+	for i in prob.config.dummies:
+		s = 'sample ' + str(i).zfill(prob.config.paddummies)
+		prob.cache.padoutput = max(prob.cache.padoutput, len(s))
 	if prob.config.usegroups:
-		# FIXME: test groups should again be supported!
-		pass
+		for group in prob.config.tests:
+			for i in group:
+				s = str(i).zfill(prob.config.padtests)
+				prob.cache.padoutput = max(prob.cache.padoutput, len(s))
+		for i in prob.config.dummies:
+			s = str(i).zfill(prob.config.paddummies)
+			yield _types[prob.config.kind](prob, s, True, 0)
+		for group in prob.config.tests:
+			yield problem.TestGroup()
+			for i in group:
+				s = str(i).zfill(prob.config.padtests)
+				yield _types[prob.config.kind](prob, s, False, prob.config.pointmap.get(i, prob.config.pointmap.get(None, prob.config.maxexitcode if prob.config.maxexitcode else 1)))
+			yield problem.test_context_end
 	else:
-		# We will need to iterate over these configuration variables twice
-		try:
-			len(prob.config.dummies)
-		except Exception:
-			prob.config.dummies = tuple(prob.config.dummies)
-		try:
-			len(prob.config.tests)
-		except Exception:
-			prob.config.tests = tuple(prob.config.tests)
-		# First get prob.cache.padoutput right
-		for i in prob.config.dummies:
-			s = 'sample ' + str(i).zfill(prob.config.paddummies)
-			prob.cache.padoutput = max(prob.cache.padoutput, len(s))
 		for i in prob.config.tests:
 			s = str(i).zfill(prob.config.padtests)
 			prob.cache.padoutput = max(prob.cache.padoutput, len(s))
-		# Now yield the actual test cases
 		for i in prob.config.dummies:
 			s = str(i).zfill(prob.config.paddummies)
 			yield _types[prob.config.kind](prob, s, True, 0)