provide team evaluation of survey
This commit is contained in:
		
							parent
							
								
									d50b1ca9df
								
							
						
					
					
						commit
						cacca105ac
					
				
					 2 changed files with 32 additions and 2 deletions
				
			
		|  | @ -40,6 +40,10 @@ It's possible to leave some of the questions unanswered. | ||||||
|   >>> resp02 = Response(quest, 'john') |   >>> resp02 = Response(quest, 'john') | ||||||
|   >>> resp02.values = {qu01: 2, qu03: 4} |   >>> resp02.values = {qu01: 2, qu03: 4} | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
|  | Evaluation | ||||||
|  | ========== | ||||||
|  | 
 | ||||||
| Now let's calculate the result for resp01. | Now let's calculate the result for resp01. | ||||||
| 
 | 
 | ||||||
|   >>> res = resp01.getResult() |   >>> res = resp01.getResult() | ||||||
|  | @ -55,8 +59,8 @@ Now let's calculate the result for resp01. | ||||||
|   fi03 4.0 |   fi03 4.0 | ||||||
|   fi01 2.4 |   fi01 2.4 | ||||||
| 
 | 
 | ||||||
| Grouped Feedback Items | Grouped feedback items | ||||||
| ====================== | ---------------------- | ||||||
| 
 | 
 | ||||||
|   >>> from cybertools.knowledge.survey.questionnaire import QuestionGroup |   >>> from cybertools.knowledge.survey.questionnaire import QuestionGroup | ||||||
|   >>> qugroup = QuestionGroup(quest) |   >>> qugroup = QuestionGroup(quest) | ||||||
|  | @ -74,3 +78,13 @@ Grouped Feedback Items | ||||||
|   ...     print fi.text, round(score, 2) |   ...     print fi.text, round(score, 2) | ||||||
|   fi03 0.75 |   fi03 0.75 | ||||||
| 
 | 
 | ||||||
|  | Team evaluation | ||||||
|  | --------------- | ||||||
|  | 
 | ||||||
|  |   >>> resp03 = Response(quest, 'mary') | ||||||
|  |   >>> resp03.values = {qu01: 1, qu02: 2, qu03: 4} | ||||||
|  | 
 | ||||||
|  |   >>> res, ranks, averages = resp01.getTeamResult([resp01, resp03]) | ||||||
|  |   >>> ranks, averages | ||||||
|  |   ([2], [0.6666...]) | ||||||
|  | 
 | ||||||
|  |  | ||||||
|  | @ -109,3 +109,19 @@ class Response(object): | ||||||
|                 wScore = relScore * len(qugroup.feedbackItems) - 0.00001 |                 wScore = relScore * len(qugroup.feedbackItems) - 0.00001 | ||||||
|                 result.append((qugroup, qugroup.feedbackItems[int(wScore)], relScore)) |                 result.append((qugroup, qugroup.feedbackItems[int(wScore)], relScore)) | ||||||
|         return result |         return result | ||||||
|  | 
 | ||||||
|  |     def getTeamResult(self, teamData): | ||||||
|  |         mine = self.getGroupedResult() | ||||||
|  |         all = [d.getGroupedResult() for d in teamData] | ||||||
|  |         averages = [] | ||||||
|  |         ranks = [] | ||||||
|  |         for idx, qgdata in enumerate(mine): | ||||||
|  |             total = 0.0 | ||||||
|  |             pos = len(teamData) | ||||||
|  |             for j, data in enumerate(all): | ||||||
|  |                 total += data[idx][2] | ||||||
|  |                 if qgdata[2] >= data[idx][2]: | ||||||
|  |                     pos = len(teamData) - j | ||||||
|  |             ranks.append(pos) | ||||||
|  |             averages.append(total / len(teamData)) | ||||||
|  |         return mine, ranks, averages | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		
		Reference in a new issue