Skip to content

Commit 0a27bb1

Browse files
authored
Merge pull request #12 from CC-RMD-EpiBio/initial
Initial
2 parents 0418bf0 + a63b33e commit 0a27bb1

File tree

5 files changed

+82
-28
lines changed

5 files changed

+82
-28
lines changed

libfabulouscatpy/cat/itemselection.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -292,10 +292,10 @@ def _next_scored_item(
292292

293293
criterion = self.criterion(scoring=self.scoring, items = un_items, scale=scale)
294294

295-
variance = list(criterion.values())
295+
criterion = list(criterion.values())
296296

297-
variance /= np.max(variance)
298-
probs = np.exp(-variance / self.temperature)
297+
criterion -= np.max(criterion)
298+
probs = np.exp(criterion / self.temperature)
299299
probs /= np.sum(probs)
300300

301301
if self.deterministic:

libfabulouscatpy/cat/itemselectors/fisher.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,7 @@ def criterion(self, scoring: BayesianScoring, items: list[dict], scale=None) ->
8888

8989
return fish_scored
9090

91-
92-
93-
class StochasticFisherItemSelector(ItemSelector):
91+
class StochasticFisherItemSelector(FisherItemSelector):
9492

9593
description = """Selection based on Fisher information"""
9694

libfabulouscatpy/cat/itemselectors/globalinfo.py

Lines changed: 61 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -10,32 +10,32 @@
1010
# of Health and Human Services, which is making the software available to the
1111
# public for any commercial or non-commercial purpose under the following
1212
# open-source BSD license.
13-
#
13+
#
1414
# Redistribution and use in source and binary forms, with or without
1515
# modification, are permitted provided that the following conditions are met:
16-
#
16+
#
1717
# (1) Redistributions of source code must retain this copyright
1818
# notice, this list of conditions and the following disclaimer.
19-
#
19+
#
2020
# (2) Redistributions in binary form must reproduce this copyright
2121
# notice, this list of conditions and the following disclaimer in the
2222
# documentation and/or other materials provided with the distribution.
23-
#
23+
#
2424
# (3) Neither the names of the National Institutes of Health Clinical
2525
# Center, the National Institutes of Health, the U.S. Department of
2626
# Health and Human Services, nor the names of any of the software
2727
# developers may be used to endorse or promote products derived from
2828
# this software without specific prior written permission.
29-
#
29+
#
3030
# (4) Please acknowledge NIHCC as the source of this software by including
3131
# the phrase "Courtesy of the U.S. National Institutes of Health Clinical
3232
# Center"or "Source: U.S. National Institutes of Health Clinical Center."
33-
#
33+
#
3434
# THIS SOFTWARE IS PROVIDED BY THE U.S. GOVERNMENT AND CONTRIBUTORS "AS
3535
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
3636
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
3737
# PARTICULAR PURPOSE ARE DISCLAIMED.
38-
#
38+
#
3939
# You are under no obligation whatsoever to provide any bug fixes,
4040
# patches, or upgrades to the features, functionality or performance of
4141
# the source code ("Enhancements") to anyone; however, if you choose to
@@ -54,6 +54,7 @@
5454
import numpy as np
5555

5656
from libfabulouscatpy.cat.itemselection import ItemSelector
57+
from libfabulouscatpy.cat.session import CatSessionTracker
5758
from libfabulouscatpy.irt.scoring import BayesianScoring
5859

5960

@@ -67,14 +68,14 @@ def __init__(self, scoring, deterministic=True, hybrid=False, **kwargs):
6768
self.hybrid = hybrid
6869
self.deterministic = deterministic
6970

70-
def criterion(self, scoring: BayesianScoring, items: list[dict], scale=None) -> dict[str: Any]:
71-
71+
def criterion(
72+
self, scoring: BayesianScoring, items: list[dict], scale=None
73+
) -> dict[str:Any]:
7274
"""
7375
Parameters: session: instance of CatSession
7476
Returns: item dictionary entry or None
7577
"""
7678

77-
7879
unresponded = [i for i in items if "scales" in i.keys()]
7980
in_scale = [i for i in unresponded if scale in i["scales"].keys()]
8081

@@ -105,12 +106,58 @@ def criterion(self, scoring: BayesianScoring, items: list[dict], scale=None) ->
105106
)[
106107
:, unresponded_ndx, :
107108
] #
108-
109+
109110
p_itemized = np.exp(lp_itemized)
110111
pi_density = scoring.scores[scale].density
111112

112-
criterion = np.sum(p_itemized*(lp_itemized - lp_point)*pi_density[:, np.newaxis, np.newaxis], axis=0)
113+
criterion = np.trapz(
114+
p_itemized
115+
* (lp_itemized - lp_point)
116+
* pi_density[:, np.newaxis, np.newaxis],
117+
x=scoring.interpolation_pts[scale],
118+
axis=0,
119+
)
113120
criterion = np.sum(criterion, axis=-1)
114-
criterion = dict(zip([x['item'] for x in items], criterion))
121+
criterion = dict(zip([x["item"] for x in items], criterion))
115122
return criterion
116-
123+
124+
def _next_scored_item(
125+
self, tracker: CatSessionTracker, scale=None
126+
) -> dict[str : dict[str:Any]]:
127+
128+
scale = self.next_scale(tracker)
129+
un_items = self.un_items(tracker, scale)
130+
131+
if un_items is None:
132+
# Not sure if this can happen under normal testing, but included as
133+
# a safety feature.
134+
return None
135+
136+
trait = tracker.scores[scale]
137+
trait = 0.0 if trait is None else trait
138+
error = tracker.errors[scale]
139+
error = 100.0 if error is None else error
140+
141+
criterion = self.criterion(scoring=self.scoring, items=un_items, scale=scale)
142+
valid_items = [x["item"] for x in un_items]
143+
items = []
144+
Delta = []
145+
for k, v in criterion.items():
146+
if k in valid_items:
147+
items += [k]
148+
Delta += [v]
149+
if len(items) == 0:
150+
return {}
151+
Delta -= np.max(Delta)
152+
probs = np.exp(Delta / self.temperature)
153+
probs /= np.sum(probs)
154+
155+
if self.deterministic or (self.hybrid and ((self.scoring.n_scored[scale] > 3))):
156+
ndx = np.argmax(probs)
157+
else:
158+
ndx = np.random.choice(np.arange(len(criterion.keys())), p=probs)
159+
result = list(criterion.keys())[ndx]
160+
for i in un_items:
161+
if i["item"] == result:
162+
return i
163+
return {}

libfabulouscatpy/cat/itemselectors/kl.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -104,15 +104,19 @@ def criterion(self, scoring: BayesianScoring, items: list[dict], scale=None) ->
104104
p_itemized = np.exp(lp_itemized)
105105
pi_density = scoring.scores[scale].density
106106

107-
lp_infty = lp_itemized + energy[:, np.newaxis, np.newaxis]
107+
# lp_infty = lp_itemized + energy[:, np.newaxis, np.newaxis]
108+
lp_infty = 0.5*lp_itemized + energy[:, np.newaxis, np.newaxis] * (lp_itemized.shape[1] - 1)/2
108109
# N_grid x N_item x K
109110
expected_lp_infty = np.sum(lp_infty*p_itemized, axis=-1, keepdims=True)
110111
expected_lp_infty = np.sum(expected_lp_infty, axis=-2, keepdims=True)
111-
# N_grid x N_item x 1
112+
# shift it so that it is closer to the running estimate
113+
114+
115+
# N_grid x 1 x 1
112116
pi_infty = np.exp(expected_lp_infty - np.max(expected_lp_infty, axis=0, keepdims=True))
113117
pi_infty /= np.trapz(
114118
y=pi_infty, x=scoring.interpolation_pts[scale], axis=0
115-
) # N_grid x N_item x 1
119+
) # N_grid x 1 x 1
116120
##########
117121
# $\pi_\infty$ is computed
118122
########
@@ -223,15 +227,17 @@ def criterion(self, scoring: BayesianScoring, items: list[dict], scale=None) ->
223227
p_itemized = np.exp(lp_itemized)
224228
pi_density = scoring.scores[scale].density
225229

226-
lp_infty = lp_itemized + energy[:, np.newaxis, np.newaxis]
230+
# lp_infty = lp_itemized + energy[:, np.newaxis, np.newaxis]
231+
lp_infty = 0.5*lp_itemized + energy[:, np.newaxis, np.newaxis] * (lp_itemized.shape[1] - 1)/2
232+
227233
# N_grid x N_item x K
228234
expected_lp_infty = np.sum(lp_infty*p_itemized, axis=-1, keepdims=True)
229235
# N_grid x N_item x 1
230236
expected_lp_infty = np.sum(expected_lp_infty, axis=-2, keepdims=True)
231237
pi_infty = np.exp(expected_lp_infty - np.max(expected_lp_infty, axis=0, keepdims=True))
232238
pi_infty /= np.trapz(
233239
y=pi_infty, x=scoring.interpolation_pts[scale], axis=0
234-
) # N_grid x N_item x 1
240+
) # N_grid x 1 x 1
235241
##########
236242
# $\pi_\infty$ is computed
237243
########

libfabulouscatpy/cat/itemselectors/variance.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,13 +38,16 @@ def criterion(self, scoring: BayesianScoring, items: list[dict], scale=None) ->
3838

3939
# previously observed
4040
energy = self.scoring.log_like[scale] + self.scoring.log_prior[scale]
41+
pi_now = scoring.scores[scale].density
4142

4243
###
4344

4445
#######
4546
# Future
4647

4748
lp_infty = log_ell + energy[:, np.newaxis, np.newaxis]
49+
p_now = np.exp(log_ell)
50+
p_now = np.sum(pi_now[:, np.newaxis, np.newaxis]*p_now, axis=0)
4851
pi_infty = np.exp(lp_infty - np.max(lp_infty, axis=0, keepdims=True))
4952
pi_infty /= np.trapz(
5053
y=pi_infty, x=self.scoring.interpolation_pts[scale], axis=0
@@ -62,9 +65,9 @@ def criterion(self, scoring: BayesianScoring, items: list[dict], scale=None) ->
6265
x=self.scoring.interpolation_pts[scale],
6366
axis=0,
6467
)
68+
mean = np.sum(mean*p_now, axis=-1)
69+
second = np.sum(second*p_now, axis=-1)
6570
variance = second - mean**2
66-
variance = np.sum(variance * np.exp(log_ell) * pi_infty, axis=-1)
67-
variance = np.sum(variance, axis=0)
6871

6972
criterion = dict(zip([x['item'] for x in items], variance))
7073
return criterion
@@ -93,7 +96,7 @@ def _next_scored_item(
9396
variance = list(criterion.values())
9497

9598
variance /= np.max(variance)
96-
probs = np.exp(-variance) ** (1 / self.temperature)
99+
probs = np.exp(-variance / self.temperature)
97100
probs /= np.sum(probs)
98101

99102
if self.deterministic:

0 commit comments

Comments
 (0)