Skip to content

Commit

Permalink
Merge branch 'dev' of https://github.com/theislab/batchglm into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
davidsebfischer committed Feb 21, 2020
2 parents e699985 + 5707d0a commit 9a7d92b
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 29 deletions.
2 changes: 1 addition & 1 deletion batchglm/models/base_glm/input.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,4 +197,4 @@ def fetch_design_scale(self, idx):
return self.design_scale[idx, :]

def fetch_size_factors(self, idx):
return self.size_factors[idx]
return self.size_factors[idx, :]
9 changes: 3 additions & 6 deletions batchglm/train/tf1/glm_beta/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,6 @@ def init_par(
$$
"""

size_factors_init = input_data.size_factors

if init_model is None:
groupwise_means = None
init_a_str = None
Expand All @@ -185,7 +183,7 @@ def init_par(
x=input_data.x,
design_loc=input_data.design_loc,
constraints_loc=input_data.constraints_loc,
size_factors=size_factors_init,
size_factors=input_data.size_factors,
link_fn=lambda mean: np.log(
1/(1/self.np_clip_param(mean, "mean")-1)
)
Expand Down Expand Up @@ -221,7 +219,7 @@ def init_par(
x=input_data.x,
design_scale=input_data.design_scale[:, [0]],
constraints=input_data.constraints_scale[[0], :][:, [0]],
size_factors=size_factors_init,
size_factors=input_data.size_factors,
groupwise_means=None,
link_fn=lambda samplesize: np.log(self.np_clip_param(samplesize, "samplesize"))
)
Expand All @@ -248,7 +246,7 @@ def init_par(
x=input_data.x,
design_scale=input_data.design_scale,
constraints=input_data.constraints_scale,
size_factors=size_factors_init,
size_factors=input_data.size_factors,
groupwise_means=groupwise_means,
link_fn=lambda samplesize: np.log(self.np_clip_param(samplesize, "samplesize"))
)
Expand Down Expand Up @@ -291,4 +289,3 @@ def init_par(
logging.getLogger("batchglm").debug("Using initialization based on input model for dispersion")

return init_a, init_b

14 changes: 3 additions & 11 deletions batchglm/train/tf1/glm_nb/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,14 +176,6 @@ def init_par(
$$
"""

size_factors_init = input_data.size_factors
if size_factors_init is not None:
size_factors_init = np.expand_dims(size_factors_init, axis=1)
size_factors_init = np.broadcast_to(
array=size_factors_init,
shape=[input_data.num_observations, input_data.num_features]
)

if init_model is None:
groupwise_means = None
init_a_str = None
Expand All @@ -198,7 +190,7 @@ def init_par(
x=input_data.x,
design_loc=input_data.design_loc,
constraints_loc=input_data.constraints_loc,
size_factors=size_factors_init,
size_factors=input_data.size_factors,
link_fn=lambda mu: np.log(self.np_clip_param(mu, "mu"))
)

Expand Down Expand Up @@ -239,7 +231,7 @@ def init_par(
x=input_data.x,
design_scale=input_data.design_scale[:, [0]],
constraints=input_data.constraints_scale[[0], :][:, [0]],
size_factors=size_factors_init,
size_factors=input_data.size_factors,
groupwise_means=None,
link_fn=lambda r: np.log(self.np_clip_param(r, "r"))
)
Expand Down Expand Up @@ -267,7 +259,7 @@ def init_par(
x=input_data.x,
design_scale=input_data.design_scale,
constraints=input_data.constraints_scale,
size_factors=size_factors_init,
size_factors=input_data.size_factors,
groupwise_means=groupwise_means,
link_fn=lambda r: np.log(self.np_clip_param(r, "r"))
)
Expand Down
13 changes: 2 additions & 11 deletions batchglm/train/tf1/glm_norm/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,14 +172,6 @@ def init_par(
$$
"""

size_factors_init = input_data.size_factors
if size_factors_init is not None:
size_factors_init = np.expand_dims(size_factors_init, axis=1)
size_factors_init = np.broadcast_to(
array=size_factors_init,
shape=[input_data.num_observations, input_data.num_features]
)

sf_given = False
if input_data.size_factors is not None:
if np.any(np.abs(input_data.size_factors - 1.) > 1e-8):
Expand Down Expand Up @@ -268,7 +260,7 @@ def init_par(
x=input_data.x,
design_scale=input_data.design_scale,
constraints=input_data.constraints_scale,
size_factors=size_factors_init,
size_factors=input_data.size_factors,
groupwise_means=groupwise_means,
link_fn=lambda sd: np.log(self.np_clip_param(sd, "sd"))
)
Expand All @@ -282,7 +274,7 @@ def init_par(
x=input_data.x,
design_scale=input_data.design_scale[:, [0]],
constraints=input_data.constraints_scale[[0], :][:, [0]],
size_factors=size_factors_init,
size_factors=input_data.size_factors,
groupwise_means=None,
link_fn=lambda sd: np.log(self.np_clip_param(sd, "sd"))
)
Expand Down Expand Up @@ -331,4 +323,3 @@ def init_par(
logger.debug("Using initialization based on input model for dispersion")

return init_a, init_b

0 comments on commit 9a7d92b

Please sign in to comment.