From c4b74d826ab21797351724ca85e211ac4789895a Mon Sep 17 00:00:00 2001 From: Derek Ogle Date: Mon, 19 Dec 2022 10:26:46 -0600 Subject: [PATCH 1/8] Many initial updates related to changes to fishR --- DESCRIPTION | 14 +- NEWS.md | 448 +++++++----- R/BluegillJL.R | 2 +- R/BrookTroutTH.R | 2 +- R/CIDists.R | 6 +- R/ChinookArg.R | 2 +- R/CodNorwegian.R | 2 +- R/CutthroatAL.R | 2 +- R/Ecoli.R | 2 +- R/FSA.R | 6 +- R/FSAUtils.R | 58 +- R/Mirex.R | 2 +- R/PSDlit.R | 2 +- R/PikeNY.R | 2 +- R/PikeNYPartial1.R | 2 +- R/SMBassLS.R | 2 +- R/SMBassWB.R | 2 +- R/SpotVA1.R | 2 +- R/Summarize.R | 4 +- R/WR79.R | 2 +- R/WSlit.R | 2 +- R/WhitefishLC.R | 2 +- R/addZeroCatch.R | 6 +- R/ageComparisons.R | 14 +- R/alkIndivAge.R | 8 +- R/alkPlot.R | 4 +- R/alkSummaries.R | 10 +- R/bootstrap.R | 4 +- R/capHistConvert.R | 4 +- R/capHistSum.R | 4 +- R/catchCurve.R | 6 +- R/chapmanRobson.R | 6 +- R/depletion.R | 6 +- R/dunnTest.R | 4 +- R/expandCounts.R | 2 +- R/expandLenFreq.R | 2 +- R/extraTests.R | 4 +- R/growthModels.R | 8 +- R/hist.formula.R | 4 +- R/histFromSum.R | 2 +- R/knitUtil.R | 2 +- R/ksTest.R | 2 +- R/lencat.R | 4 +- R/lwCompPreds.R | 4 +- R/metaM.R | 4 +- R/mrClosed.R | 4 +- R/mrOpen.R | 4 +- R/nlsTracePlot.R | 2 +- R/psdAdd.R | 4 +- R/psdCI.R | 4 +- R/psdCalc.R | 4 +- R/psdPlot.R | 6 +- R/psdVal.R | 6 +- R/removal.R | 4 +- R/srStarts.R | 6 +- R/stockRecruitment.R | 4 +- R/sumTable.R | 2 +- R/tictactoe.R | 4 +- R/vbStarts.R | 6 +- R/wrAdd.R | 6 +- R/wsVal.R | 4 +- README.md | 36 +- _pkgdown.yml | 13 +- docs/LICENSE-text.html | 91 ++- docs/authors.html | 109 ++- docs/index.html | 144 ++-- docs/news/index.html | 773 +++++++++++++------- docs/pkgdown.js | 172 +++-- docs/pkgdown.yml | 6 +- docs/reference/BluegillJL.html | 126 ++-- docs/reference/BrookTroutTH.html | 128 ++-- docs/reference/ChinookArg.html | 136 ++-- docs/reference/CodNorwegian.html | 134 ++-- docs/reference/CutthroatAL.html | 132 ++-- docs/reference/Ecoli.html | 112 ++- docs/reference/FSA-internals.html | 102 ++- docs/reference/FSA.html | 110 ++- docs/reference/Mirex.html | 128 ++-- docs/reference/PSDlit.html | 134 ++-- docs/reference/PikeNY.html | 126 ++-- docs/reference/PikeNYPartial1.html | 126 ++-- docs/reference/SMBassLS.html | 126 ++-- docs/reference/SMBassWB.html | 130 ++-- docs/reference/Schnute.html | 206 +++--- docs/reference/SpotVA1.html | 134 ++-- docs/reference/Summarize.html | 280 ++++---- docs/reference/WR79.html | 142 ++-- docs/reference/WSlit.html | 138 ++-- docs/reference/WhitefishLC.html | 126 ++-- docs/reference/addZeroCatch.html | 318 +++++---- docs/reference/ageBias.html | 606 +++++++++------- docs/reference/agePrecision.html | 274 +++---- docs/reference/alkAgeDist.html | 186 ++--- docs/reference/alkIndivAge.html | 348 ++++----- docs/reference/alkMeanVar.html | 206 +++--- docs/reference/alkPlot.html | 242 ++++--- docs/reference/binCI.html | 194 ++--- docs/reference/capHistConvert.html | 532 +++++++------- docs/reference/capHistSum.html | 216 +++--- docs/reference/catchCurve.html | 388 +++++----- docs/reference/chapmanRobson.html | 330 +++++---- docs/reference/col2rgbt.html | 148 ++-- docs/reference/depletion.html | 352 +++++---- docs/reference/dunnTest.html | 260 +++---- docs/reference/expandCounts.html | 436 ++++++------ docs/reference/expandLenFreq.html | 226 +++--- docs/reference/extraTests.html | 256 +++---- docs/reference/fact2num.html | 154 ++-- docs/reference/fishR.html | 157 ++-- docs/reference/geomean.html | 172 ++--- docs/reference/growthModels.html | 668 ++++++++--------- docs/reference/headtail.html | 186 ++--- docs/reference/hist.formula.html | 258 ++++--- docs/reference/histFromSum.html | 228 +++--- docs/reference/hyperCI.html | 144 ++-- docs/reference/index.html | 1065 ++++++++++++++++------------ docs/reference/knitUtil.html | 258 ++++--- docs/reference/ksTest.html | 226 +++--- docs/reference/lagratio.html | 190 ++--- docs/reference/lencat.html | 484 +++++++------ docs/reference/logbtcf.html | 182 ++--- docs/reference/lwCompPreds.html | 266 ++++--- docs/reference/metaM.html | 282 ++++---- docs/reference/mrClosed.html | 466 ++++++------ docs/reference/mrOpen.html | 300 ++++---- docs/reference/nlsBoot.html | 264 ++++--- docs/reference/nlsTracePlot.html | 246 ++++--- docs/reference/perc.html | 180 ++--- docs/reference/plotAB.html | 272 ++++--- docs/reference/poiCI.html | 178 ++--- docs/reference/psdAdd.html | 296 ++++---- docs/reference/psdCI.html | 244 ++++--- docs/reference/psdCalc.html | 280 ++++---- docs/reference/psdPlot.html | 264 ++++--- docs/reference/psdVal.html | 206 +++--- docs/reference/rSquared.html | 164 ++--- docs/reference/rcumsum.html | 216 +++--- docs/reference/removal.html | 440 ++++++------ docs/reference/se.html | 158 +++-- docs/reference/srStarts.html | 238 ++++--- docs/reference/sumTable.html | 190 ++--- docs/reference/tictactoe.html | 244 ++++--- docs/reference/validn.html | 162 ++--- docs/reference/vbStarts.html | 342 +++++---- docs/reference/wrAdd.html | 224 +++--- docs/reference/wsVal.html | 180 ++--- man/BluegillJL.Rd | 2 +- man/BrookTroutTH.Rd | 2 +- man/ChinookArg.Rd | 2 +- man/CodNorwegian.Rd | 2 +- man/CutthroatAL.Rd | 2 +- man/Ecoli.Rd | 2 +- man/FSA.Rd | 4 +- man/Mirex.Rd | 2 +- man/PSDlit.Rd | 2 +- man/PikeNY.Rd | 2 +- man/PikeNYPartial1.Rd | 2 +- man/SMBassLS.Rd | 2 +- man/SMBassWB.Rd | 2 +- man/Schnute.Rd | 2 +- man/SchnuteRichards.Rd | 2 +- man/SpotVA1.Rd | 2 +- man/Summarize.Rd | 2 +- man/WR79.Rd | 2 +- man/WSlit.Rd | 2 +- man/WhitefishLC.Rd | 2 +- man/addZeroCatch.Rd | 4 +- man/ageBias.Rd | 8 +- man/agePrecision.Rd | 8 +- man/alkAgeDist.Rd | 4 +- man/alkIndivAge.Rd | 6 +- man/alkMeanVar.Rd | 4 +- man/alkPlot.Rd | 4 +- man/binCI.Rd | 2 +- man/boot.Rd | 2 +- man/capFirst.Rd | 2 +- man/capHistConvert.Rd | 4 +- man/capHistSum.Rd | 4 +- man/catchCurve.Rd | 4 +- man/chapmanRobson.Rd | 4 +- man/col2rgbt.Rd | 2 +- man/depletion.Rd | 4 +- man/dunnTest.Rd | 2 +- man/expandCounts.Rd | 2 +- man/expandLenFreq.Rd | 2 +- man/extraTests.Rd | 2 +- man/fact2num.Rd | 2 +- man/fishR.Rd | 13 +- man/growthModels.Rd | 4 +- man/headtail.Rd | 2 +- man/hist.formula.Rd | 4 +- man/histFromSum.Rd | 2 +- man/hyperCI.Rd | 2 +- man/is.odd.Rd | 2 +- man/knitUtil.Rd | 2 +- man/ksTest.Rd | 2 +- man/lagratio.Rd | 2 +- man/lencat.Rd | 4 +- man/logbtcf.Rd | 2 +- man/lwCompPreds.Rd | 4 +- man/metaM.Rd | 4 +- man/mrClosed.Rd | 4 +- man/mrOpen.Rd | 4 +- man/nlsBoot.Rd | 2 +- man/nlsTracePlot.Rd | 2 +- man/peek.Rd | 2 +- man/perc.Rd | 2 +- man/plotAB.Rd | 2 +- man/poiCI.Rd | 2 +- man/psdAdd.Rd | 4 +- man/psdCI.Rd | 4 +- man/psdCalc.Rd | 4 +- man/psdPlot.Rd | 4 +- man/psdVal.Rd | 4 +- man/rcumsum.Rd | 2 +- man/removal.Rd | 4 +- man/repeatedRows2Keep.Rd | 2 +- man/se.Rd | 2 +- man/srFuns.Rd | 4 +- man/srStarts.Rd | 4 +- man/sumTable.Rd | 2 +- man/tictactoe.Rd | 4 +- man/validn.Rd | 2 +- man/vbStarts.Rd | 4 +- man/wrAdd.Rd | 4 +- man/wsVal.Rd | 4 +- tests/testthat/testthat_FSAUtils.R | 22 +- 227 files changed, 11453 insertions(+), 9878 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index a69f406a..c6ab2510 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -4,11 +4,15 @@ Date: 2022-2-15 Title: Simple Fisheries Stock Assessment Methods Description: A variety of simple fish stock assessment methods. Authors@R: c( - person("Derek", "Ogle", email="derek@derekogle.com", role=c("aut","cre"), + person("Derek", "Ogle", email="DerekOgle51@gmail.com", + role=c("aut","cre"), comment=c(ORCID="0000-0002-0370-9299")), - person("Jason", "Doll", email="jason.doll@fmarion.edu", role=c("aut")), - person("Powell", "Wheeler", email="powell.wheeler@gmail.com", role="aut"), - person("Alexis", "Dinno", email="alexis.dinno@pdx.edu", role="aut", + person("Jason", "Doll", email="jason.doll@fmarion.edu", + role=c("aut")), + person("Powell", "Wheeler", email="powell.wheeler@gmail.com", + role="aut"), + person("Alexis", "Dinno", email="alexis.dinno@pdx.edu", + role="aut", comment="Provided base functionality of dunnTest()") ) URL: https://fishr-core-team.github.io/FSA/ @@ -46,4 +50,4 @@ Suggests: tibble, covr Encoding: UTF-8 -RoxygenNote: 7.1.2 +RoxygenNote: 7.2.2 diff --git a/NEWS.md b/NEWS.md index 531231fd..655daea7 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,11 +1,20 @@ -# FSA 0.9.3.9000 ongoing -* Updated sticker. -* Updated `pkgdown.yaml` GitHub action to [v2](https://github.com/r-lib/actions/tree/v2-branch/examples#build-pkgdown-site). Changed action to only run on a release (rather than a push) but it can be [run manually](https://docs.github.com/en/actions/managing-workflow-runs/manually-running-a-workflow) as well. -* Updated `R-CMD-check.yaml` GitHub action to [v2](https://github.com/r-lib/actions/tree/v2-branch/examples#standard-ci-workflow). Note that I had to add the [extra code for dealing with graphics on the Mac version](https://github.com/r-lib/actions#common-questions). -* `alkIndivAge()`: Modified. Added a catch for `NA`s in the length sample. Also added a test. This addresses [#88](https://github.com/fishR-Core-Team/FSA/issues/88)). +# FSA 0.9.3.9000 +* Changes related to moving to fishR-Core-Team + * Updated sticker. + * Changed DHO e-mail address (in DESCRIPTION and in all author fields of the documentation). Partially address [#86](https://github.com/fishR-Core-Team/FSA/issues/86). + * Updated `pkgdown.yaml` GitHub action to [v2](https://github.com/r-lib/actions/tree/v2-branch/examples#build-pkgdown-site). Changed action to only run on a release (rather than a push) but it can be [run manually](https://docs.github.com/en/actions/managing-workflow-runs/manually-running-a-workflow) as well. + * Updated `R-CMD-check.yaml` GitHub action to [v2](https://github.com/r-lib/actions/tree/v2-branch/examples#standard-ci-workflow). Note that I had to add the [extra code for dealing with graphics on the Mac version](https://github.com/r-lib/actions#common-questions). +* Changes related to new fishR webpage + * Updated links in `fishR()`, `FSA()`, and `README.md`. Partially address [#86](https://github.com/fishR-Core-Team/FSA/issues/86). + * Updated all links to Introductory Fisheries Analyses with R book. + * Added links to CSV files for all data sets. This addresses [#96](https://github.com/fishR-Core-Team/FSA/issues/96). + * Changed theme in `_pkgdown.yml` to match that of `FSAdata` and more closely match `fishR`. + * Removed most recent dates from NEWS file as `pkgdown` picks up the CRAN release date to add. + +* `alkIndivAge()`: Modified. Added a catch for `NA`s in the length sample. Also added a test. This addresses [#88](https://github.com/fishR-Core-Team/FSA/issues/88). * `confint.boot()`: Modified. Changed hard-coding of columns that contained the confidence interval values to find those columns by `grep()`ing for the `%` sign. This fixes an issue related to `car::Confint()` returning the `coef()` results for functions that have a `coef()` method but not for those that do not. Also updated tests to use results from `car::Boot()` rather than the old `car::bootCase()`. -# FSA 0.9.3 15-Feb-21 +# FSA 0.9.3 * Moved `dplyr` from `imports` to `suggests` (needed because functions were removed in last version; however it is still used in some examples; partially addresses [#87](https://github.com/fishR-Core-Team/FSA/issues/87)). * Removed `sciplot` from `imports` (because functions were removed in last version; partially addresses [#87](https://github.com/fishR-Core-Team/FSA/issues/87)). * Updated tests for `ksTest()` to handle issues on the CRAN M1 build machine (per e-mail from Prof. Ripley on 15-Feb-22; partially addresses [#87](https://github.com/fishR-Core-Team/FSA/issues/87)). @@ -24,10 +33,10 @@ * `WSLit`: Modified. Added info for Utah Chub (from [here](https://webpages.uidaho.edu/quistlab/publications/NAJFM_2021_Black_et_al_UTC_Ws_length_categories.pdf); address [#84](https://github.com/fishR-Core-Team/FSA/issues/84)). * `wsVal()`: Modified. Changed the way `WSlit` was loaded into the function environment so that `FSA::wsVal()` will work. Addresses [#85](https://github.com/fishR-Core-Team/FSA/issues/85). -# FSA 0.9.1 17-Jul-21 +# FSA 0.9.1 * Corrected testing issue for `catchCurve()` and `chapmanRobson()` as directed by CRAN on 17-Jul-21. Issue likely caused by changes to `fishmethods` package. -# FSA 0.9.0 8-Jun-21 +# FSA 0.9.0 * Make note of the several **removed** (now defunct) and **deprecated** (soon to be defunct) functions listed below. * Added Jason Doll as an `AUThor`. * Moved `plyr` from Imports to Suggests. @@ -70,7 +79,7 @@ * Added Striped Bass (Hybrid) and Striped Bass x White Bass; though these are the same as the existing Palmetto Bass. * `wsVal()`: Modified. Changed a `levels()` in `iwsLitCheck()` to `unique()` because `species` is no longer a factor due to updating `WSlit` (i.e., rdata file changed with new `read.csv()`). -# FSA 0.8.32 15-Jan-21 +# FSA 0.8.32 * Removed Travis-CI and appveyor. * No longer using coveralls for coverage statistics. Changed to codecov.io. * Added GitHub Action for CI/CD (used `usethis::use_github_action_check_standard()`). @@ -85,37 +94,40 @@ * `psdPlot()`: Modified. Fixed bug with box around the plot when `add.psd=FALSE`. Added 5% expansion to top of y-axis so that bars did not run into the box. * `residPlot()`: Modified. Fixed bugs with handling models that used character rather than factor variables. -# FSA 0.8.31 7-Nov20 +# FSA 0.8.31 * Now using roxygen v7.1.1. * Added `tibble` to suggests (see comment about `headtail()` below). * Cleaned up the documentation of parameters for `RichardsFuns()` (documentation did not change when parameter letters were changed for the Age and Growth book). * Changed example in `headtail()` to use `as_tibble()` from `tibble` package rather than `tbl_df()` from `dplyr` package. Required adding `tibble` to suggests. * `nlsTracePlot()`: Modified. Created a conditional catch depending on the version of R as the results of `nls(*,trace=TRUE)` are changing in v4.1.0 (per e-mail from Martin Maechler on 2-Nov-20). -# FSA 0.8.30 9-Mar-20 +# FSA 0.8.30 +* **Date:** 9-Mar-20 * Started using `rhub::check_for_cran()` for checking before sending to CRAN. * Updated tests for `Summarize()` and `ksTest()` that used `data.frame()`. This should have been done with v0.8.28. * Fixed errors for tests in `ksTest()` that were identified using R-hub. * Removed all links to documentation in non-dependent or non-suggested packages. This removes a note from R-hub. * `fishR()`: Modified. Changed base URL to `https:` (from `http:`). Added `open=`, primarily to allow not opening a browser during testing. -# FSA 0.8.29 8-Mar-20 +# FSA 0.8.29 +* **Date:** 8-Mar-20 * Removed dependency on `epitools` package as it may soon be orphaned. See changes to `binCI()` and `poiCI()` outlined below. * `binCI()`: Modified. Added internal functions that are based on (but not identical to) functions in the `epitools` package which will possibly be deprecated soon (per note from CRAN on 7-Mar-20). * `poiCI()`: Modified. Added internal functions that are based on (but not identical to) functions in the `epitools` package which will possibly be deprecated soon (per note from CRAN on 7-Mar-20). -# FSA 0.8.28 28-Feb-20 +# FSA 0.8.28 +* **Date:** 28-Feb-20 * `fitPlot()`: Modified. Changed so that lines are plotted after the points in the IVR versions. * `ksTest()`: Modified. Changed documentation examples to handle R's new way of handling `stringsAsFactors=` (per request from CRAN on 27-Feb-20). * `psdAdd()`: Modified. Changed testing to handle R's new way of handling `stringsAsFactors=` (per request from CRAN on 27-Feb-20). -# FSA 0.8.27 2-Feb-20 +# FSA 0.8.27 * Now using ROxygen2 7.0.2. * Removed dependency on `gplots` package as it is now orphaned. Required adding `iRichColors()` internal function. * `lwCompPreds()`: Removed `\dots` from arguments as it was not in usage (per request from CRAN on 3-Feb-20). * `repeatedRows2Keep()`: Modified. Now makes comparisons as if `NA`s are regular values. -# FSA 0.8.26 22-Nov-19 +# FSA 0.8.26 * Changed to depending on `R >=3.5.0`, because that is the latest version required by a package (i.e., `car`) that FSA imports or suggests. Used the "check_r_versions_of_package_dependencies" shiny app by "ateucher" (on Github) to help determine this. * Removed `asbio` package from suggests as it hung up Travis-CI build (because of the need for the TCLTK package). * `capFirst()`: Modified. Fixed bug related to an `NA` item. @@ -141,11 +153,11 @@ * `nlsTracePlot()`: Modified. Moved error catching for improper keyword for legend placement forward. * `SchnuteRichards()`: Added. This addresses [#54](https://github.com/fishR-Core-Team/FSA/issues/54). -# FSA 0.8.22 22-Nov-18 +# FSA 0.8.22 * Corrected CITATION file. * Updated tests for changes in the `fishmethods` package (`vblrt()` replaced with `growthlrt()` and `T=` replaced with `TC=` in `M.empirical()`) per CRAN request. -# FSA 0.8.21 2-Nov-18 +# FSA 0.8.21 * Added a webpage. Setup Travis-CI to handle updates ([See this](https://www.datacamp.com/community/tutorials/cd-package-docs-pkgdown-travis)). * Added a hex sticker logo. * Added `withr` to Imports (see usages below). @@ -175,8 +187,7 @@ * `SMBassWB`: Modified. Fixed minor data entry error in row 383. * `vbFuns()`: Modified. Added `Francis3` to the list of models. - -# FSA 0.8.20 18-May-18 +# FSA 0.8.20 * Added `asbio`, `DescTools`, `nlme`, and `psych` packages to Suggests because they are used in tests (and as will soon be required by CRAN ... per an e-mail from CRAN on 17-May-18). * Fixed a bunch of bad links to other packages in the documentation. * Removed the "Date" field from the Description file. @@ -186,7 +197,7 @@ * `mrClosed()`: Modified. Fixed a bug that was related to `poiCI()` returning results from all four types. Now `mrClosed()` will use only one type. Thanks to Timothy Spiers for pointing out this bug. * `SMBassWB`: Modified. Fixed minor data entry error in row 404. Changed link in documentation from `alr3` to `alr4` package. -# FSA 0.8.19 8-Apr-18 +# FSA 0.8.19 * `addZeroCatch()`: Modified. Changed two `1:nrow()` structures to `seq_len(nrow())` (partially addressing [#36](https://github.com/fishR-Core-Team/FSA/issues/36)). * `ageBias()`: Modified. Changed all `1:` structures to `seq_len()` (partially addressing [#36](https://github.com/fishR-Core-Team/FSA/issues/36)). * `agePrecision()`: Modified. Changed all `1:` structures to `seq_len()` (partially addressing [#36](https://github.com/fishR-Core-Team/FSA/issues/36)). @@ -213,7 +224,8 @@ * `Summarize()`: Modified. Changed one `1:length()` structure to `seq_along()` (partially addressing [#36](https://github.com/fishR-Core-Team/FSA/issues/36)). * `wrAdd()`: Modified. Changed three `1:` structures to `seq_len()` or `seq_along()` (partially addressing [#36](https://github.com/fishR-Core-Team/FSA/issues/36)). -# FSA 0.8.18 31-Mar-18 +# FSA 0.8.18 +* **Date:** 31-Mar-18 * Changed to depending on `R >=3.2.0`, because that is the latest version required by a package (i.e., `car`) that FSA imports or suggests. Used the "check_r_versions_of_package_dependencies" shiny app by "ateucher" (on Github) to help determine this. * Using latest `testthat` package. * `bootCase()`: Added. This was added because `bootCase()` will soon be removed from the `car` package. It was added so that the code in the Introductory Fisheries Analyses with R book will still work. It is largely a wrapper to `Boot()` in `car` with `method="case"`. The documentation was updated somewhat. @@ -229,27 +241,28 @@ * `vbFuns()`: Modified. Switched `Fabens` and `Fabens2` parameterizations to better match `Wang` (i.e., increment model first). Added `Francis2` parameterization for tag-recapture data. * `vbStarts()`: Modified. Fixed some spacing issues with the warnings when starting values for Linf was poorly estimated. Added an argument to `ivbStarts.LinfK()` to suppress checking the value of Linf. This argument reduces the change of double-printing the warning message when there are bad estimates of starting values for Linf and K. -# FSA 0.8.17 29-Oct-17 +# FSA 0.8.17 * `dunnTest()`: Modified. Adjusted code to handle the addition of `altp=` to and modified output from `dunn.test()` in `dunn.test`. Added additional tests and corrected some issues in the documentation. * `GompertzFuns()`: Modified. Fixed error in message (i.e., `msg=TRUE`) for `param="Ricker2"`. -# FSA 0.8.16 7-Sep-17 +# FSA 0.8.16 * Need to resubmit v0.8.15 to CRAN, so bumped the version. * `growthFunShow()`: Modified. Fixed error in expression for `type="Logistic"` and `param="CampanaJones1"`. -# FSA 0.8.15 6-Sep-17 +# FSA 0.8.15 +* **Date:** 6-Sep-17 * Added a script to the `helpers` directory that will test that all required packages are installed. * `iAddOutlierTestResults()`: Modified. Fixed bug related to point labels in `residPlot()` when the data.frame for the original model had `NA` values. * `removal()`: Modified document by merging pull request [#33](https://github.com/fishR-Core-Team/FSA/pull/33). * `srStarts()`: Modified. Added `fixed=`. Added some catches for poor starting values. Added relevant tests. Addresses [#30](https://github.com/fishR-Core-Team/FSA/issues/30). -# FSA 0.8.14 27-Jul-17 +# FSA 0.8.14 * Moved `dunn.test` and `lmtest` to `imports` to help with portability for workshops. * `ageBias()`: Modified. Fixed bug in `plot()` so that the tick marks on the marginal histograms match the tick marks on the main plot. Changed the default `hist.panel.size=` in `plot()` so that it more reliably prints the values on the axes of the marginal histograms. * `removal()`: Modified. Added "warnings" for when all catches are zeroes (an object is still returned with all `NA`s). Thanks to Daniel Hanks for pointing out this issue. * `Summarize()`: Modified. Fixed bug when `percZero!="always"` and there are no valid values such that the calculated percent of zeroes is `NA`. -# FSA 0.8.13 29-Apr-17 +# FSA 0.8.13 * `ageBias()`: Modified. A complete rebuild of `plot`. Major changes are to add `plotAB()` which is primarily used to make the "legacy" age bias plots of Campana, removal of the "sunflower" plot option, new sets of defaults for many of the arguments that reflect my preferences for visualizing age comparisons (which includes defaulting to plotting differences in ages), addition of the ability to add marginal histograms (`xHist=`, `yHist=`, `col.hist=`, and `hist.panel.size=`), better handling of axis ticks and labels (primarily to show ticks at integers and make sure 0 is included for differences), and allowing the ability to add "summary layers" to the main plot (see `allowAdd=`). Many examples were added. Some functionality from previous versions will be broken. * `capFirst()`: Modified. Changed some `if()`s with `class()`es to `inherits()`. * `compIntercepts()`: Modified. Replaced two `dim()` calls with `nrow()`. @@ -270,7 +283,7 @@ * `residPlot()`: Modified. Changed default for `loess=` from `TRUE` to `FALSE`. Changed some `if()`s with `class()`es to `inherits()`. * `wrAdd()`: Modified. Changed some `if()`s with `class()`es to `inherits()`. -# FSA 0.8.12 12-Mar-17 +# FSA 0.8.12 * Lots of spelling corrections after running `devtools::spell_check()`. * Cleaned up some issues in the testing files that were caused by a new version of `fishmethods` and changes to R v3.4.0. * `metaM()`: Modified. Changed `T=` to `Temp=` to reduce potential for conflicts with `TRUE` abbreviation. @@ -278,7 +291,7 @@ * `srStarts()`: Modified. Corrected mis-spelling in directive to `FSAsim` package. * `vbStarts()`: Modified. Added a catch that Linf cannot be automatically estimated with fewer than three ages. Corrected mis-spelling in directive to `FSAsim` package. -# FSA 0.8.11 13-Dec-16 +# FSA 0.8.11 * Changed all `stop()`s to `STOP()`s and all `warning()`s to `WARN()`. This modified nearly all functions. * Changed all `paste()`s that used `sep=""` to `paste0()`s. * Removed several `sep=""`s from `message()`s. @@ -321,7 +334,7 @@ * `vbFuns()`: Modified. Added `Ogle` to list of parameterizations. Changed order of `L0` and `K` parameters in returned function when `param="Original"`. * `vbStarts()`: Modified. Added the `methLinf=` argument that allows the user to choose if Linf is estimated from a Walford plot (`methLinf="Walford"`; the default and old functionality), as the mean of fish in a certain number of old ages (`methLinf="oldAge"`), or as the mean of a certain number of the longest fish (`methLinf="longFish"`). The number of ages or long fish is given in `num4Linf=`. Added methods for `type="Ogle"`. -# FSA 0.8.10 24-Sep-16 +# FSA 0.8.10 * `alkIndivAge()`: Modified. Added `na.rm=TRUE` to the checks on the minimum and maximum length data. * `catchCurve()`: Modified. Removed `type=` and blended that functionality into `parm=` for methods. Made `parm=` consistent across methods. * `chapmanRobson()`: Modified. Added `axis.age=` argument that allows the user to choose which type of x-axis is displayed (see examples; this addresses [#20](https://github.com/fishR-Core-Team/FSA/issues/20)) Also modified code that adds the axes so that they should "look better" in more instances. Added `na.rm=TRUE` to y-range calculation for the plot method. Added a `coef()` method. Added a `parm=` argument to the `confint()` and `summary()` methods. Added tests. @@ -337,19 +350,19 @@ * `Summarize()`: Modified. Added `nvalid=` and `percZero` to only print the nvalid and percZero result if they are "interesting" (i.e., different than n or zero, respectively) by default (may be manually over-ridden). Modified tests. * `vbStarts()`: Modified. Added `na.rm=TRUE` to checking of Linf values. -# FSA 0.8.9 23-Aug-16 +# FSA 0.8.9 * `ageComparison()`: Modified. Removed an internal call to `fact2num()` because of changes to `Summarize()` below. Should not impact user experience. * `diags()`: Added. * `gompertzFuns()`: Modified. Fixed some spacing around the message when `msg=TRUE`. * `logisticFuns()`: Modified. Fixed some spacing around the message when `msg=TRUE`. * `Summarize()`: Modified. Removed all uses where the main variable was a factor (this functionality was largely unneeded and unused, was inelegant and difficult to maintain). Removed pass-through to `summary`. Removed warnings about the RHS variables being converted to factors. Columns for "levels" of the RHS variables are now returned in their original model (i.e., if the variable was numeric in the original data.frame it is now numeric in the data.frame returned from this function) -- this should reduce need for using `fact2num()` when using the results of this function for variables that were originally numeric. Added more examples and tests for the numeric data. -# FSA 0.8.8 18-Jul-16 +# FSA 0.8.8 * `growthFunShow()`: Modified. Added Pauly et al. (1992) seasonal cessation function. Added `case=` for use with Schnute model. * `vbFuns()`: Modified. Added Pauly et al. (1992) seasonal cessation function. Slightly modified messages for "Typical" and "Original" parameterizations. * `vbStarts()`: Modified. Added `fixed=` so that the user can define some of the starting values. Added Pauly et al. (1992) seasonal cessation function. Added tests for `fixed=`. -# FSA 0.8.7 8-May-16 +# FSA 0.8.7 * Compiled under R v3.3.0. * Removed `relax` from `Suggests`. See `srStarts()` and `vbStarts()` notes below. This addresses [#17](https://github.com/fishR-Core-Team/FSA/issues/17). * Removed `gdata` from `Imports`. See `filterD()` and `Subset()` notes below. This addresses [#5](https://github.com/fishR-Core-Team/FSA/issues/5). @@ -378,7 +391,7 @@ * `vbModels()`: Removed. Replaced with `growthFunShow()`. * `vbStarts()`: Modified. Removed `dynamicPlot=TRUE` option. Moved it to `FSAsim` package. Added `param=` to match other `vbXXX()` (works as does `type=`). Modified plot when `plot=TRUE` by adding "STARTING VALUES" to title and moving starting values to within the plot. Added and `col.main=`. Made warnings and error tests more explicit. -# FSA 0.8.6 25-Mar-16 +# FSA 0.8.6 * Fixed problems with tests, and made the tests more explicit, related to PSD and Wr functions. Suppressed some warnings related to `sumTable()` in ALK related tests and `Summarize()` in age comparisons tests. Prompted by forthcoming changes to `testthat`. * Removed `News.md` from `.Rbuildignore` (apparently now supported by CRAN). * `alkPlot()`: Modified. Changed so that `xlim=` and `ylim=` would work when `type="area"` and `type="bar"`. This fixes [#10](https://github.com/fishR-Core-Team/FSA/issues/10) (Thanks to Joseph Feldhaus). @@ -393,8 +406,7 @@ * `psdVal()`: Modified. Minor changes to documentation. * `purl2()`: Modified. Added `delHeader=` argument and functionality. - -# FSA 0.8.5 14-Feb-16 +# FSA 0.8.5 * Added URL for fishR webpage in DESCRIPTION per CRAN request. Removed it from the URL field in DESCRIPTION. * Updated all references to Ogle (2016) in documentation. @@ -406,8 +418,7 @@ * `reproInfo()`: Modified. Made changes to `iGetAllDependencis()` based on forthcoming changes to `package.dependencies()` (as notified by CRAN). * `vbStarts()`: Modified. Fixed bug when `dynamicPlot=TRUE` was used. - -# FSA 0.8.4 21-Dec-15 +# FSA 0.8.4 * Now using Roxygen2 v5.0.1. * Removed some `requireNamespaces()` from some functions and moved those packages from `Suggests` to `Imports` so that those functions would work better with other packages. The only `requireNamespaces()` that remain are related to functions that require the `relax` package (so tcltk is not installed until needed) and `knitr`, `dunn.test`, and `lmtest` as these are unlikely to be used by other packages and will keep the packages that are loaded with `FSA` to a minimum. Packages moved from `Suggests` to `Depends` are `Hmisc` (for use in `binCI`), `gdata` (for use in `filterD()` and `Subset()`), `dplyr` (for use in `filterD()`), `sciplot` (for use in `fitPlot()`), `car` (for use in `residPlot()`), and `gplots` (for use with colors). * `addZeroCatch()`: Modified tests (to reduce warnings that were not part of tests). @@ -417,10 +428,11 @@ * `sumTable()`: Modified tests (but with `dimnames()`). -# FSA 0.8.3 23-Oct-15 +# FSA 0.8.3 * Removed vignetteBuilder from DESCRIPTION (remnant from a vignette I built and then removed) at request of CRAN. -# FSA 0.8.2 22-Oct-15 +# FSA 0.8.2 +* **Date:** 22-Oct-15 * Converted all files in `data-raw` to CSV files. * Removed all `\href{}{}` and `\url{}` codes to websites that I don't control. The addresses are now "naked" such that the user will need to copy-and-paste them into a browser to view the web page rather than clicking on a hyper link. Hopefully this will eliminate problems with R CMD CHECK. * `ChinookArg`: Updated help documentation. @@ -429,16 +441,14 @@ * `PikeNYPartial1`: Updated help documentation. * `SpotVA1`: Updated help documentation. -# FSA 0.8.1 10-Oct-15 -* **Submitted to CRAN.** +# FSA 0.8.1 * `col2rgbt()`: Added. * `compIntercepts()`: Added. * `compSlopes()`: Added. ---- -# FSA 0.8.0 8-Oct-15 -* **Submitted to CRAN** (*had an error in the tests for Sparc-Solaris*). +# FSA 0.8.0 8 * Added suggests for `dunn.test` for use in `dunnTest()` (see below). * `agePrecision()`: Modified. Changed `combn()` to `utils::combn()` and `sd()` to `utils::sd()` (within an `apply()`). * `catchCurve()`: Modified. Changed `na.exclude()` to `stats::na.exclude()`. @@ -480,42 +490,50 @@ * `test_PSD`: Modified. Altered tests that had used `==` to use `expect_equivalent()` which uses `all.equal()` with `check.attributes=FALSE`. -# FSA 0.7.11 Oct15 +# FSA 0.7.11 +* **Date:** Oct15 * Converted all `.txt` files to `.Rda` files. Original `.txt` files are in the `data-raw` directory which was added to `.Rbuildignore`. -# FSA 0.7.10 Oct15 +# FSA 0.7.10 +* **Date:** Oct15 * `purl2()`: Added `newname=` to allow the output file to have a name other than the same as the intput file. * `reproInfo()`: Added `markdown` to the `out=` types. -# FSA 0.7.9 Sep15 +# FSA 0.7.9 +* **Date:** Sep15 * Updated `README.md` and `DESCRIPTION` for new websites. * Changed all references to the WordPress site to the new website. Removed links to specific IFAR chapters. Changed my e-mail address. Created link in references to IFAR book page. * `fishR()`: Modified. Updated for the new websites. -# FSA 0.7.8 Sep15 +# FSA 0.7.8 +* **Date:** Sep15 * `ageComparison()`: Modified. Changed `what="McNemars"` and `what="Bowkers"` to `what="McNemar"` and `what="Bowker"`. Fixed bug if all ages are `NA`. * `catchCurve()`: Modified. Fixed bug related to `NA` values in the catch vector. * `chapmanRobson()`: Modified. Fixed bug related to `NA` values in the catch vector. * `validn()`: Modified. Fixed bug related to when a 1-dimensional numeric vector was not recognized as a vector. -# FSA 0.7.7 Aug15 +# FSA 0.7.7 +* **Date:** Aug15 * `ageBias()`: Modified. Changed default for `pch.mean=` to 95 (from 175). If `what=` has only one item, then results will now be invisibly returned so that results can be saved to an object. * `agePrecision()`: Modified. Added `trunc.diff=`. If `what=` has only one item, then results will now be invisibly returned so that results can be saved to an object. * `mapvalues()`: Modified. Corrected to export properly. * `removal()`: Modified. Minor edits to labels if `verbose=TRUE`. Added some more tests. * `vbStarts()`: Modified. Made `yngAge` the default for `meth0=`. Fixed bug that occured when `meth0='yngAge'` and sample sizes at all ages were 1. -# FSA 0.7.6 Aug15 +# FSA 0.7.6 +* **Date:** Aug15 * `Summarize()`: Modified. Converted to using `iHndlFormula()`. Changed output for quantitative data (`validn` is always returned, `NAs` is never returned). Changed output for two-way factor data (not returned as a character from `formatC()`). Removed `...` from code in several places as it was buggy and not used. Added more checks and modified check messages. Fixed bug from when a 1-d matrix of characters was sent. Added tests. * `sumTable()`: Modified. Converted to using `iHndlFormula()`. Added tests. -# FSA 0.7.5 Aug15 +# FSA 0.7.5 +* **Date:** Aug15 * `addRadCap()`: Modified. Streamlined code. Changed default `in.pre=` to `NULL` (from `inc`). Added some tests for returned data. * `BluegillLM`: Removed. Moved to `FSAdata`. * `gConvert()`: Modified. Streamlined code. Changed `type=` to `out.type=`. Changed default `in.pre=` and `in.var=` to `NULL` (from missing). Changed code to handle changes in `in.pre=` and `in.var=`. Added some tests for returned data. * `gReshape()`: Removed. Moved to `FSAmisc`. -# FSA 0.7.4 Aug15 +# FSA 0.7.4 +* **Date:** Aug15 * `binCI()`: Modified. Check for`Hmisc` with `requireNamespaces()` before processing body of function. This allowed moving `Hmisc` into `Suggests` declarations rather than `Imports`. * `chooseColors()`: Modified. Check for`gplots` with `requireNamespaces()` before processing body of function. This allowed moving `gplots` into `Suggests` declarations rather than `Imports`. * `filterD()`: Modified. Check for `dplyr` and `gdata` with `requireNamespaces()` before processing body of function. This allowed moving `dplyr` and `gdata` into `Suggests` declarations rather than `Imports`. @@ -529,11 +547,13 @@ * `vbStarts()`: Modified. Check for`relax` with `requireNamespaces()` before constructing the dynamic plot. This allowed moving `relax` into `Suggests` declarations rather than `Imports`. -# FSA 0.7.3 Aug15 +# FSA 0.7.3 +* **Date:** Aug15 * Removed all `importFrom()` directives and went to hard-wiring to packages with `::`. Added `imports()` directives for `stats`, `graphics`, `tools`, and `grDevices`. Removed `imports()` directive for `multcomp()`. * `vbStarts()`: Modified. Changed default methos for `methEV=`. Changed order of starting values for `type="Mooij"` in order to match that from `vbFuns()`. This also fixed a bug when `dynamicPlot=TRUE` was used with `type="Mooij"`. Added tests to determine if parameter order is the same between `vbStarts()` and `vbFuns()` for all parameterizations. -# FSA 0.7.2 Jul15 +# FSA 0.7.2 +* **Date:** Jul15 * `ageBias()`: Modified. Corrected bug with labeling of x-axis on age-bias plot when `ref.lab=` and `nref.lab=` were not given by the user. Changed default for `nYpos=` from `1.1` to `1.03`. Added `cex.n=` to allow control of the size of the sample size labels. * `agePrecision()`: Modified. Changed `what="detail"` to `what="details"`. Note that `what="detail"` still works. * `dunnTest()`: Modified. Added a note to the help file about the use of complete cases. Suggested from Paule Bodson-Clermont. @@ -541,7 +561,8 @@ * `vbModels()`: Modified. Changed order of `Original` and `Typical` (`Typical` is now shown first). Fixed in error in how the equation for the `Weisberg` parameterization was displayed. * `vbStarts()`: Modified. Added `cex.main=` as an argument and defaulted to `0.75`. Added `raw=TRUE` to `poly()` which is used when `meth0="poly"`. Added `Original` and `Typical` to the `type=` options. This allows both a capitalized and uncapitalized version for these two parameterizations. -# FSA 0.7.1 Jul15 +# FSA 0.7.1 +* **Date:** Jul15 * `ageBias()`: Modified. Moved into a single file with `agePrecision()`. Cleaned-up help file. No change in behavior. * `agePrecision()`: Modified. Moved into a single file with `ageBias()`. Cleaned-up help file. No change in behavior. * `alkAgeDist()`: Modified. Moved into a single file with `alkMeanVar()`. Cleaned-up help file. Added some error/warning tests. No change in behavior. @@ -569,7 +590,8 @@ * `vbFuns()`: Moved into `growthModels` file. Did not change behavior. * `vbModels()`: Modified. Changed `type=` to `family=` to avoid confusion in the help file with `type=` in `vbFuns()`. Moved into `growthModels` file. -# FSA 0.7.0 Jul15 +# FSA 0.7.0 +* **Date:** Jul15 * Fixed description to be in title case. * Fixed reference to fishR page in description file. * Fixed several URL references, deleted others that have changed and are no longer available. @@ -577,7 +599,8 @@ * `hist.formula()`: Modified. Rebuilt to use `iHndlFormula()`. Modified how `xlab=` is used (result is the same). Added some tests. * `ksTest()`: Modified. Rebuilt to use `iHndlFormula()`. Added some tests for messages and to make sure results matched `ks.test()`. -# FSA 0.6.25 Jul15 +# FSA 0.6.25 +* **Date:** Jul15 * `alkPrep()`: Deleted. Moved to `FSAmisc` package (on GitHub). * `changesPos()`: Deleted. Moved to `FSAmisc` package (on GitHub). * `chapmanPlot()`: Deleted. Moved to `FSAmisc` package (on GitHub). @@ -594,17 +617,20 @@ * `TroutDietSL`: Deleted. Moved to `FSAmisc` package (on GitHub). * `walfordPlot()`: Deleted. Moved to `FSAmisc` package (on GitHub). -# FSA 0.6.24 Jun15 +# FSA 0.6.24 +* **Date:** Jun15 * `alkIndivAge()`: Modified. Switched to using `iHndlFormula()` at the beginning. Added more checks and tests. * `expandLenFreq()`: Modified. Added more checks. Added some tests. * `wsVal()`: Modified. Added more tests. -# FSA 0.6.23 Jun15 +# FSA 0.6.23 +* **Date:** Jun15 * `ageBias()`: Modified. Fixed bugs related to axes on numbers plot and sunflower plot. * `filterD()`: Modified. Added `reorder=FALSE` to `drop.levels()` so that the order of levels is not changed when levels are dropped. * `residPlot.nlme()`: Added. -# FSA 0.6.22 Jun15 +# FSA 0.6.22 +* **Date:** Jun15 * `extraSS()`: Modified. Added `sim.name=` to allow for a common typing mistake. * `logbtcf()`: Modified. Slight change to handle a check of `lm` class. * `lrt()`: Modified. Added `sim.name=` to allow for a common typing mistake. @@ -612,13 +638,15 @@ * `rcumsum()`: Modified. Modified to handle `table`, `matrix`, and `data.frame` classes as long as they are 1-dimensional. * `srStarts()`: Modified. Corrected some bugs related to checks. Added more tests. -# FSA 0.6.21 Jun15 +# FSA 0.6.21 +* **Date:** Jun15 * `addRadCap()`: Modified. Modified so that `in.pre=` string must be at the start of the variable names. Added a check for when the `in.pre=` string does not exist at the start of any variable names. Added a check for whether all `in.var=` variables exist. Added some simple tests (need more). * `bcFuns()`: Modified. Removed `type=`; `BCM=` can now be either numeric or a string. Allowed string to be in any case (will be converted to the required all upper-case). Corrected some errors for when `msg=TRUE`. Added some simple tests. * `gConvert()`: Modified. Modified so that `in.pre=` string must be at the start of the variable names. Added a check for when the `in.pre=` string does not exist at the start of any variable names. Added some simple tests (need more). * `gReshape()`: Modified. Modified so that `in.pre=` string must be at the start of the variable names. Added a check for when the `in.pre=` string does not exist at the start of any variable names. Added some simple tests (need more). -# FSA 0.6.20 Jun15 +# FSA 0.6.20 +* **Date:** Jun15 * `gompFuns()`: Deleted. * `gompModels()`: Deleted. * `GompertzFuns()`: Added. Replaced `gompFuns()`. Added `type="Troynikov1"` and `type="Troynikov1"`. @@ -633,11 +661,13 @@ * `vbFuns()`: Modified. Added `type="Polacheck"` which is equivalent to `type="Laslett"`. Added a new reference in the help file. * `vbModels()`: Modified. Added `cex=` and `type=`. -# FSA 0.6.19 Jun15 +# FSA 0.6.19 +* **Date:** Jun15 * `RichardsFuns()`: Added. * `RichardsModels()`: Added. -# FSA 0.6.18 Jun15 +# FSA 0.6.18 +* **Date:** Jun15 * Changed nearly all "messages" using `cat()` to using `message()` so that they can be suppressed with `suppressMessage()` or `message=FALSE` in knitr. See "One comment on messages" at http://yihui.name/knitr/demo/output/. Specific functions modified are listed below. * `ageBias()`: Modified. Changed all `cat()` to `message()`s. Changed so that messages (result headers) are only printed if `what=` contains more than one item. * `agePrecision()`: Modified. Changed all `cat()` to `message()`s. Changed so that messages (result headers) are only printed if `what=` contains more than one item. @@ -655,26 +685,30 @@ * `removal()`: Modified. Changed all `cat()` to `message()`s. * `srFuns()`: Modified. Changed all `cat()` to `message()`s. Created some tests. -# FSA 0.6.17 Jun15 +# FSA 0.6.17 +* **Date:** Jun15 * `extraSS()`: Modified. Added more message tests and some calculational tests (compared to `anova()` results). * `gompFuns()`: Modified. Changed all `cat()`s to `message()`s and slightly modified the messages. Fixed minor bugs in some created functions. Created some tests. * `logisticFuns()`: Modified. Changed all `cat()`s to `message()`s and slightly modified the messages. Fixed minor bugs in some created functions. Created some tests. * `lrt()`: Modified. Added more message tests and some calculational tests (compared to `lrtest()` from `lmtest` package results). * `vbFuns()`: Modified. Changed all `cat()`s to `message()`s and slightly modified the messages. Fixed minor bugs in some created functions. Created some tests. -# FSA 0.6.16 Jun15 +# FSA 0.6.16 +* **Date:** Jun15 * `extraSS()`: Modified. Added `sim_names=` and `com_name=` so that simple descriptive names could be given to the model and printed in the heading of the output. Added checks for whether the complex model appears more complex or not. Added tests for warning and error messages. * `fishR()`: Modfiied. Fixed bug with `where="news"`. Added tests. * `fitPlot()`: Modified. Added ability to modify y-axis limits for the nonlinear regression model. Thanks to Gabriela N. for asking for this. * `hoCoef()`: Modified. Changed `lmobj=` to `object=`, added degrees-of-freedom to the output matrix, streamlined the code, added some checks, and added some tests. * `lrt()`: Modified. Added `sim_names=` and `com_name=` so that simple descriptive names could be given to the model and printed in the heading of the output. Added checks for whether the complex model appears more complex or not. Added tests for warning and error messages. -# FSA 0.6.15 Jun15 +# FSA 0.6.15 +* **Date:** Jun15 * `addZeroCatch()`: Modified. Deleted extraneous `print()` statement. * `lencat()`: Modified. Major re-write to make it easier to trouble-shoot. Fixed bug related to empty category on end when `as.fact=TRUE` and `use.names=TRUE`. Added more tests. * `psdCalc()`: Modified. Removed extra open-ended category (e.g., PSD-T-) for PSD intervals. -# FSA 0.6.14 May15 +# FSA 0.6.14 +* **Date:** May15 * Added travis-ci integration. * Added coveralls integration. * Added `importFrom` for `mapvalues()` from `plyr`. @@ -696,12 +730,14 @@ * `Subset()`: Modified. Added some checks with error messages. Added suite of tests. * `validn()`: Modified. Added some checks with error messages. Added suite of tests. -# FSA 0.6.13 May15 +# FSA 0.6.13 +* **Date:** May15 * Some miscellaneous reorganizations of files. * `ageBias()`: Modified. Corrected bugs with `show.pts=TRUE` and "sunflower plot" that came from changes made in version 0.5.1. * `residPlot()`: Modified. Deleted `student=`. Added `resid.type=` which allows used of standardized (internally studentized) and (externally) studentized residuals for linear models (along with raw residuals). Added code following `nlsResiduals()` from `nlstools` for standardized residuals for nonlinear models. -# FSA 0.6.12 May15 +# FSA 0.6.12 +* **Date:** May15 * `gompFuns()`: Added. * `gompModels()`: Added. * `logisticFuns()`: Added. @@ -709,7 +745,8 @@ * `reproInfo()`: Modified. Added the `out=` argument to allow the output to be straight R or LaTeX. Removed the `listFiles=` argument. Changed the output to be more succinct. Streamlined the code. * `vbFuns()`: Modified. Fixed a bug with the Laslett model. -# FSA 0.6.11 Apr15 +# FSA 0.6.11 +* **Date:** Apr15 * `kCounts()`: Added. Was `swvCounts()`. * `kPvalue()`: Added. Was `swvPvalue()`. * `purl2()`: Added. Was `swvCode()`. Added `timestamp=` argument for adding a timestamp to the created script. @@ -723,45 +760,53 @@ * `swvPvalue()`: Deleted. Changed to `kPvalue()`. * `swvREG()`: Deleted. Moved to `NCStats`. -# FSA 0.6.10 Apr15 +# FSA 0.6.10 +* **Date:** Apr15 * Compiling under R 3.2.0. * Added some cross-reference links to help files. * Remove fishR vignette section and added IFAR Chapter section to help files. * `fishR()`: Modified. Added `IFAR` as an option. Updated code to be more simple. -# FSA 0.6.5 Apr15 +# FSA 0.6.5 +* **Date:** Apr15 * Last version for submission of first draft of Introductory Fisheries Analyses with R. * `capHistConvert()`: Modified. Added a warning section and an example of problems that can occur if the data are in event format but the event variable contains unused levels as may occur following subsetting. Thanks to Joseph Feldhaus for pointing out this problem. * `extraSS()`: Modified. Changed algorithm to determine if the models were of the same class or not. The modification allows a model to have multiple classes. * `iHndlCols2Use` (Intrnal Function): Modified. Fixed bug with how the columns were selected. Added a suite of tests for this function. This will fix bugs in `capHistConvert()` and `capHistSum()`. Thanks to Joseph Feldhaus for pointing out this egregious error. * `lrt()`: Modified. Changed algorithm to determine if the models were of the same class or not. The modification allows a model to have multiple classes. -# FSA 0.6.4 Apr15 +# FSA 0.6.4 +* **Date:** Apr15 * Changed to using `LazyData: true`. * `se()`: Added. Removed `importFrom` of `se()` from `sciplot`. -# FSA 0.6.3 Apr15 +# FSA 0.6.3 +* **Date:** Apr15 * Some modifications to tests. * `plot.capHist()`: Modified. Changed default plot look which can now be controlled with `pch=`, `cex.pch=`, and `lwd=`. Modified the two y-axis scales to use `plotmath` characters. -# FSA 0.6.2 Mar15 +# FSA 0.6.2 +* **Date:** Mar15 * `capHistConvert()`: Modified. Streamlined code around creating `var.lbls`. Made `event` the default value for `var.lbls.pre=`. Added some checks to `var.lbls.pre=` if it starts with a number or has too many values. Added `cols2use=` and modified use of `cols2ignore=` via `iHndlCols2use()`. * `capHistSum()`: Modified. * `iHndlCol2use()`: Added. Added this internal function to handle `cols2use=` and `cols2ignore=` in `capHistConvert()` and `capHistSum()`. -# FSA 0.6.1 Mar15 +# FSA 0.6.1 +* **Date:** Mar15 * `catchCurve()`: Modified. Changed how `ages2use=` was handled so that negative values can be used to exclude some ages. Will also now send an error if a mix of positive and negative ages are sent in `ages2use=`. Better handled the situation where `ages2use=` had more ages than the `age` variable. Checked for non-positive weights if `weighted=TRUE` and returned a warning and changed the non-positive weights to the minimum of the positive weights. * `chapmanRobson()`: Modified. Changed how `ages2use=` was handled so that negative values can be used to exclude some ages. Will also now send an error if a mix of positive and negative ages are sent in `ages2use=`. Better handled the situation where `ages2use=` had more ages than the `age` variable. * `expandCounts()`: Modified. Changed so that ``message()''s are printed at the end instead of along the way. This reduces confusion of what appear to be messages of success followed by an error. Thanks to Dan Oele bringing this confusion to my attention. * `plotBinResp()`: Modified. Changed the way the breaks were calculated (uses `lencat()` now). -# FSA 0.6.0 Mar15 +# FSA 0.6.0 +* **Date:** Mar15 * updated DESCRIPTION file (following this -- http://r-pkgs.had.co.nz/description.html * `srFuns()`: Modified. Changed function returned when `simplify=FALSE` so that if the parameters are named that the name is dropped. Thus, when the function is used, the returned result will not be annoyingly named as the first parameter. Added functionality for the "density-independence" model. * `srStarts()`: Modified. Added functionality for the "density-independence" model. * `vbFuns()`: Modified. Changed function returned when `simplify=FALSE` so that if the parameters are named that the name is dropped. Thus, when the function is used, the returned result will not be annoyingly named as the first parameter. -# FSA 0.5.3 Mar15 +# FSA 0.5.3 +* **Date:** Mar15 * `growthModelSim()`: Deleted. The simulation functionality was moved to the `FSAsim` package. The functionality related to finding starting values for the von Bertalanffy modesl was moved to `vbStarts()`. * `srFuns()`: Modified. A complete rebuild to make similar to `vbFuns()`. Added `simple=`. Added `type='Shepherd'` for the Shepherd (1982) three parameter model and `type='SailaLorda'` for the "Saila-Lorda" three parameter model from Iles (1994). Added tests for error messages. * `srModels()`: Modified. A complete rebuild to make similar to `growthModels()`. Added "Shepherd" and "Saila-Lorda" models. @@ -769,41 +814,51 @@ * `srStarts()`: Modified. A complete rebuild to streamline. Removed default method (i.e., a formula must be used now). Added "Shepherd" and "Saila-Lorda" models. Modified plotting routine, including adding `col.mdl=`, `lwd.mdl=`, and `lty.mdl=`. Moved the dynamic modeling aspects of `srSim()` into this function and is called with the new argument `dynamicPlot=TRUE`. Also added `minmax.ratio=` and `delta.prop=` for use with the dynamic plots. * `vbStarts()`: Modified. A complete rebuild to streamline and fix some bugs that had not been found. Modified plotting routine, including adding `col.mdl=`, `lwd.mdl=`, and `lty.mdl=`. Also added all of the von Bertalanffy parameterizations in `growthModelSim()` into this function and is called with the new argument `dynamicPlot=TRUE`. Added dynamics plots for the "Francis" and "Schnute" parameterizations. -# FSA 0.5.2 Mar15 +# FSA 0.5.2 +* **Date:** Mar15 * `psdPlot()`: Modified. Fixed bug related to `NA`s in `max.brks` variable. -# FSA 0.5.1 Mar15 +# FSA 0.5.1 +* **Date:** Mar15 * `ageBias()`: Modified. Reversed the order of the formula ... it is now `nrefvar~refvar`. This more closely matches other R functions where the tilde may be interpreted as the word "by". In other words, the formula now reads as "nonreference variable by reference variable" (i.e., Y by X). Thanks for Richard McBride for the suggestion. Modified the age-bias plot extensively ... added `sfrac=` and defaulted to 0 to remove ends of the confidence intervals, added `cex.mean=` to control the size of the symbol for the mean point, added `lwd=` that will controland set all of the `lwd` defaults to 1. * `agePrecision()`: Modified. Changed all "CV" results to "ACV". -# FSA 0.4.51 Mar15 +# FSA 0.4.51 +* **Date:** Mar15 * `catchCurve()`: Modified. Updated the help file regarding `zmethod="Smithetal"`. -# FSA 0.4.50 Mar15 +# FSA 0.4.50 +* **Date:** Mar15 * `capFirst()`: Modified. Added a check to make sure the inputted object was either a character or factor class. Added code to return the object as the same class of the original object. * `lencat()`: Modified. Added a catch for bad choices of arguments. Added a catch to send a warning if the vector contains all `NA` values (this happens when `lencat()` is used within a loop or as part of `psdAdd()`). Added tests for error and warning messages. Changed how the formula was handled in the formula method. * `psdAdd()`: Modified. Fixed a bug with names when using labels. Added `verbose=`. Added catches and sent messages if `verbose=TRUE` for when no Gabelhouse lengths are know for a species and if the lengths for a species are all missing (see note for `lencat()` above). * `PSDlit`: Modified. Fixed the trophy length for White Bass (from 15 to 18). This solved a bug related to non-unique breaks. -# FSA 0.4.49 Mar15 +# FSA 0.4.49 +* **Date:** Mar15 * `expandCounts()`: Modified. Made message regarding rows with zero counts more useful. Added missing counts to the catch of zero counts. Made changes to handle more "odd" data entries (see "details" in the help file). Made some tests. Added some tests. -# FSA 0.4.48 Mar15 +# FSA 0.4.48 +* **Date:** Mar15 * `psdCalc()`: Modified. Corrected "bug" with `units=`. Also modified warning message when no "stock" fish were present in the data.frame to further note what `units=` were used (i.e., this problem is likely to happen if the data is inches but the user uses the default `units='mm'`). Thanks to S. Mather for inspring this fix. -# FSA 0.4.47 Feb15 +# FSA 0.4.47 +* **Date:** Feb15 * `dunnTest()`: Modified. Corrected "bug" in the order that the groups are subtracted (i.e., they were flipped). -# FSA 0.4.46 Feb15 +# FSA 0.4.46 +* **Date:** Feb15 * `catchCurve()`: Modified. Changed default for `pos.est=` to `topright`. Added `cex.pos=` (and set default to slightly smaller value). * `chapmanRobson()`: Modified. Changed default for `pos.est=` to `topright`. Added `cex.pos=` (and set default to slightly smaller value). -# FSA 0.4.45 Feb15 +# FSA 0.4.45 +* **Date:** Feb15 * `hist.formula()`: Modified. Changed use of `par()` to eliminate modifications to the gridding of plots after the function is complete. Also removed the setting of `mar=` and `mgp=` in `par()`. * `mrOpen()`: Modified. Removed pretty printing for `summary()` and `confint()` methods. These got in the way of being able to `cbind()` the results together for a succinct display. * `residPlot()`: Modified. Changed use of `par()` to eliminate modifications to the gridding of plots after the function is complete. -# FSA 0.4.44 Feb15 +# FSA 0.4.44 +* **Date:** Feb15 * `.onAttach()`: Modified. Centered the message and adjusted for different lengths of version numbers. * `alkPlot()`: Modified. Fixed bug when using `add=TRUE` with `type="bubble"`. * `capHistSum()`: Modified. Changed to return `par()` options to what they were before the function was called. @@ -815,37 +870,45 @@ * `lwCompPreds()`: Modified. Changed `quant.lens=` to `qlens=`. Changed default `qlens=` to have the 5th and 95th percentiles rather than the minimum and maximum values. Added `qpens.dec=` so that the user could control the number of decimals for the lengths derived from `qlens=`. * `srSim()`: Modified. Changed to return `par()` options to what they were before the function was called. -# FSA 0.4.43 Feb15 +# FSA 0.4.43 +* **Date:** Feb15 * `mrOpen()`: Modified. Changed `summary()` and `confint()` methods to allow single, multiple, or all choices of parameters to return results for. Also added code to print the results more prettily. * `swvCode()`: Modified. Fixed bug related to `blanks='extra'`. -# FSA 0.4.42 Feb15 +# FSA 0.4.42 +* **Date:** Feb15 * `filterD()`: Added. -# FSA 0.4.41 Jan15 +# FSA 0.4.41 +* **Date:** Jan15 * `catchCurve()`: Modified. Removed the use of larger points in the `plot()`. * `chapmanRobson()`: Modified. Removed the use of larger points in the `plot()`. * `metaM()`: Modified. Deleted `group=` (and created `method="ZhangMegreyD"` and `method="ZhangMegreyP"`). Added geometric mean regresson methods for Hoenig. Changed default for `justM=` to `TRUE`. Fixed several minor bugs from the original implementation. Added some checks for reasonableness of some arguments. Created tests for several methods to see if the results matched those from Kenchington (2014). Added code to compute with several methods at once. * `Mmethods()`: Added. Added as a function and removed as a vector. -# FSA 0.4.40 Jan15 +# FSA 0.4.40 +* **Date:** Jan15 * `lencat()`: Modified. Fixed a bug that occurred if `breaks=` were given but the vector contained `NA`s. Thanks to Ben Neely for pointing this out. -# FSA 0.4.39 Jan15 +# FSA 0.4.39 +* **Date:** Jan15 * `catchCurve()`: Modified. Changed `use.weights=` to `weighted=`. Added some checks for the formula in the formula version and for the variables in the default version. Add unit tests for warnings and errors and tow tests for values. * `chapmanRobson()`: Modified. Added the `method="Smithetal"` methodology for estimating the SE of Z (and made it the default). Added some checks for the formula in the formula version and for the variables in the default version. Added `verbose=` to `summary()`. Add unit tests for warnings and errors and two tests for values. -# FSA 0.4.38 Jan15 +# FSA 0.4.38 +* **Date:** Jan15 * `alkPlot()`: Modified. Changed behavior for adding a legend to alleviate a bug. * `metaM()`: Added. -# FSA 0.4.37 Jan15 +# FSA 0.4.37 +* **Date:** Jan15 * `confint.nlsBoot()`: Modified. Changed default for `err.col=` to `black` from `red`. Fixed example due to changes in `nlsBoot` package. * `extraSS()`: Modified. Added a catch to make sure all models are of the same type. Added a catch to note that the function does not work with other that `lm()` or `nls()` models. Fixed a bug related to the labels for results from `anova()` being different depending on whether `lm()` or `nls()` models were given. Added some examples. * `hist.formula()`: Modified. Fixed bug (originated in last version) that nothing was returned when only one histogram was constructed. * `lrt()`: Modified. Changed call to `lrtest()` to a call to `lrtest.default()`. Added a catch to make sure all models are of the same type. Note that degrees-of-freedom from `lrtest()` are not error df; thus, modified to report error df to match `extraSS()`. Added some examples. -# FSA 0.4.36 Jan15 +# FSA 0.4.36 +* **Date:** Jan15 * `hist.formula()`: Modifiied. Added `iaxs=`, which when set to the default value of `TRUE` will use `xaxs="i"` and `yaxs="i"` to remove the "floating" x-axis produced by `hist()` in base R. * `lwCompPreds()`: Modified. Added the `yaxs=` argument. * `psdCalc()`: Modified. Added `showIntermediate=` to allow showing intermediate values in the calculation of the PSD indices. Added `justAdds=` to allow the user to return just those results that pertain to the values in `addLens=`. Added ability to use a named vector in `addLens=` and then not use `addNames=`. Changed `digits=1` to `digits=0`. Thanks to Ben Neely for the suggestions. @@ -853,13 +916,16 @@ * `residPlot()`: Modiifed. Added `xpd=TRUE` to the loess line routine so that the curve and polygon would stay within the plotting region. * `tictactoe()`: Modified. Add the ability to handle differences between when `xaxs="r"` and `yaxs="r"` are used and when `xaxs="i"` and `yaxs="i"` are used. -# FSA 0.4.35 Jan15 +# FSA 0.4.35 +* **Date:** Jan15 * `dunnTest()`: Added. -# FSA 0.4.34 Dec14 +# FSA 0.4.34 +* **Date:** Dec14 * `addZeroCatch()`: Modified. Removed `idvar=`, forced the `eventvar=` and `speciesvar=` variables in the returned data.frame to be numeric if they were numeric in the original data.frame, allowed `speciesvar=` to have more than one variable, and added `na.rm=`. Multiple values for `specvar=` will allow the user to add zeros based on a combination of variables (e.g., species and size category). The `na.rm=` argument allows the user to remove "missing" species, which are common if some sampling events did not capture any fish. -# FSA 0.4.33 Dec14 +# FSA 0.4.33 +* **Date:** Dec14 * `growthModelSim()`: Modified. Changed all "K0" objects to "t50". * `headtail()`: Added. * `logbtcf()`: Added. @@ -867,14 +933,16 @@ * `vbFuns()`: Modified. Changed all "K0" objects to "t50". * `vbStarts()`: Modified. Changed all "K0" objects to "t50". -# FSA 0.4.32 Nov14 +# FSA 0.4.32 +* **Date:** Nov14 * `expandcounts()`: Added (from `fishWiDNR` package). * `expandLenFreq()`: Added. Same as `lenFreqExpand()` but thought that this name fits better with `expandCounts()`. * `pcumsum()`: Added. * `rcumsum()`: Modified. Completely new code (much simpler). * `validn()`: Added. -# FSA 0.4.31 Nov14 +# FSA 0.4.31 +* **Date:** Nov14 * Removed the suggests for `plyr`. * `addZeroCatch()`: Modified. Slight modifications to help file. Fixed bug related to error checking the number of variables. Added some tests. * `lencat()` Modified. Added `droplevels=` and kept `drop.levels=` as I could not consistently remember what the name of the argument was -- i.e., the user can use either one, but `droplevels=` is preferred. @@ -885,14 +953,16 @@ * `swvCode()`: Modified. Removed ability to Stangle the code and thus removed `method=`. Modified code to allow usage of .Rmd files in addition to .Rnw files. -# FSA 0.4.30 Oct14 +# FSA 0.4.30 +* **Date:** Oct14 * Added a suggests for `plyr`, for examples using `mapvalues()`. * `lencat()`: Modified. Changed `as.fact=` to default to same as `use.names=`. This will result in the same behavior as before. However, it also allows the user to set `use.names=TRUE` and `as.fact=FALSE` to return a character vector (that is not a factor). * `psdAdd()`: Modified. Added `addSpec=` and `addLens=` so that the user can have non-Gabelhouse lengths for individual species. * `PSDlit`: Modified. Changed "Walleye x Sauger" to "Saugeye" and "White Bass x Striped Bass" to "Palmetto Bass". Updated the Palmetto Bass values based on Dumont and Neely (2011), but kept old values as "Palmetto Bass (original)". Deleted redundant entries for some species. * `recodeF()`: Deleted. Functionality is in `mapvalues()` from `plyr`. Ease come easy go (i.e., added in last version). -# FSA 0.4.29 Oct14 +# FSA 0.4.29 +* **Date:** Oct14 * Added a suggests for `dplyr`. * Added an external file in inst/extdata for testing PSD and Wr calculations. * `capFirst()`: Modified. Changed `words=` to `which=`. @@ -902,14 +972,16 @@ * `wrAdd()`: Modified. Added a default and a formula version to allow efficiency with `dplyr`. Added examples. Updated tests. * `WSlit`: Modified. Added results for Sardine. -# FSA 0.4.28 Sep14 +# FSA 0.4.28 +* **Date:** Sep14 * `psdAdd()`: Added. * `psdDataPrep()`: Deleted. Functionality replaced by `psdAdd()`. * `recodeSpecies()`: Modified. Completely re-written but with the same basic functionality. This new version returns a vector that can then be appended to an existing data.frame rather than the old function that returned a whole data.frame. This function should allow ease of use with `mutate()` from `dplyr`. Added more catches for bad `formuala=`s. Added some tests. * `wrAdd()`: Modified. Completely re-written with completely new functionality. This new version returns a vector that can then be appended to an existing data.frame rather than the old function that returned a whole data.frame. This function should allow ease of use with `mutate()` from `dplyr`. Added more catches for bad `formuala=`s. Added some tests. * `wrDataPrep()`: Deleted. Functionality replaced by new `wrAdd()`. -# FSA 0.4.27 Sep14 +# FSA 0.4.27 +* **Date:** Sep14 * `hist.formula()`: Modified. Slight modifications to warning messages. * `Summarize()`: Modified. Slight modification to warning messages. * `tictactoe()`: Modified. Changed `predbal=` to `predobj=`, `preybal=` to `predbal=`, `xlab=` to `predlab=`, `ylab=` to `preylab=`, `bal.col=` to `obj.col=`, and `bal.trans=` to `obj.trans=`. @@ -918,7 +990,8 @@ * `wsLit`: Modified. Changed order of variables, changed hybrid species names to match that of Neumann et al. (2012), update comments to related to Neumman et al. (2012) rather than Blackwell et al. (2012), and added information for the Riffle Dace. * `wsVal()`: Modified. Changed the names of the `min.len` and `max.len` variables to be either `min.TL` and `max.TL` or `min.FL` and `max.TL` as appropriate. Suppressed the return of `max.len` and `quad` if they did not exist and suppressed return of `comment` if it was `none`. Added a catch if more than one species was given in `species=`. Created some tests. -# FSA 0.4.26 Sep14 +# FSA 0.4.26 +* **Date:** Sep14 * `capFirst()` Modified. Added an option to handle a vector of strings rather than just a single string. * `lencat()`: Modified. Fixed bug with category names when `use.names=TRUE`. Moved all internal functions outside of `lencat()` environment (and renamed them). Cleaned up code. * `psdCI()`: Modified. Added more catches for calls with mistakes. Create some internal functions to modularize the computations. Added tests. @@ -930,7 +1003,8 @@ * `tictactoe()`: Modfied. * `tictactoeAdd()`: Removed. Directed user to use `plotCI()` from `plotrix` instead. -# FSA 0.4.25 Sep14 +# FSA 0.4.25 +* **Date:** Sep14 * `mrClosed()`: Modified. Better handled a given value of `R=`. * `psdCalc()` Modified. Fixed a bug that appeared when no "zero" fish were present in the data. Moved all internal functions outside of `psdCalc()` environment (and renamed them). * `psdCI()`: Added. @@ -938,7 +1012,8 @@ * `swvCode()`: Modified. Fixed bug when attempting to use this function from outside of the directory where the .Rnw file exists. Added functionality to add a "note" to the first line(s) of the output file. Added code to remove the first line of the output file if it was going to be blank. * `swvFinish()`: Modified. Updated code because `iGetFilePrefix()` was deleted. -# FSA 0.4.24 Aug14 +# FSA 0.4.24 +* **Date:** Aug14 * `ageKey()`: Deprecated. See `alkIndAge()`. * `ageKeyPlot()`: Deprecated. See `alkPlot()`. * `ageKeyPrep()`: Deprecated. See `alkPrep()`. @@ -952,25 +1027,30 @@ * `Summarize()`: Modified. Moved all internal functions outside of `Summarize()` environment (and renamed them). -# FSA 0.4.23 Aug14 +# FSA 0.4.23 +* **Date:** Aug14 * `removal()`: Modified. Completely modified the code so that the examples with `apply()` and `lapply()` would also provide confidence intervals. Also changed the code to reflect that $\sum_{i=1}^{k-1}T_{i}$ from Schnute (1983) is the same as $X$ from Carle and Strub (1978), the $\sum_{i=1}^{k-1}T_{i}-C_{1}$ in Schnute (1983) is the same as $X-(k-1)C_{1}$, and $q$ in Schnute (1983) is $p$ in most other resources. These changes allowed some efficiencies and connected the theory behind the methods more firmly. Removed the check for character data. Kept the check for whether catch was a vector or not but if catch is a one row or one column matrix or data.frame then it will be converted to a vector to continue. The latter change allows one to extract one row from a data.frame to send to `removal()` without having to use `as.numeric()`. Modified and added examples of the use of `apply()` and `lapply()`. -# FSA 0.4.22 Aug14 +# FSA 0.4.22 +* **Date:** Aug14 * `ageKey()`: Modified. Changed to using `all.equal()` to check if the ALK has rows that don't sum to 1. This was an attempt to minimize the number of "false negatives" caused by [R FAQ 7.31](https://cran.r-project.org/doc/FAQ/R-FAQ.html#Why-doesn_0027t-R-think-these-numbers-are-equal_003f). Changed the check of whether the longest fish in the length sample is greater than the longest length bin in the ALK to whether the longest fish in the length sample is greater than the longest length bin in ALK PLUS the minimum width of length categories. This last change is an attempt to minimize the number of warnings that occur when the longest fish in the length sample would be in the last length category ALK but because the length categories are labelled by their minimum length it looks like it is not. The minimum width is used to still allow unevent length categories and, thus, this check may still produce some "false negatives." * `ageKeyPlot()`: Modified. Removed `bubble.ylab=`. Modified `ylab=` to handle what `bubble.ylab=` used to handle. * `removal()`: Modified. Added options to perform Moran (1951) and Schnute (1983) removal methods. Added examples of the new functionality. Updated the tests for the new functionality. -# FSA 0.4.21 Jul14 +# FSA 0.4.21 +* **Date:** Jul14 * `depletion()`: Modified. Changed `type=` to `method=` and added `DeLury` as an option to `method=` (and left `Delury`). Changed `ricker.mod=` to `Ricker.mod=`. Added some checking for bad arguments. Created internal functions specific to the Leslie and DeLury methods (for isolation). Modified some clunky code. Added references to specific sections in Seber (2002) for SE equations. Updated examples. Added tests and error checking. * `coef.depletion()`: Modified. Added `digits=`. * `confint.depletion()`: Modified. Added `digits=`. Modified the `parm=` list to be more efficient. * `plot.depletion()`: Modified. Removed internal `par()` settings. * `summary.depletion()`: Modified. Added `verbose=` and `digits=`. -# FSA 0.4.20 Jul14 +# FSA 0.4.20 +* **Date:** Jul14 * `removal()`: Modified. Made `"CarleStrub"` the default method. Changed `type=` to `method=`. Changed internal `meth` object to `lbl`. Moved all internal functions outside of `mrOpen()` environment and added other internal functions to isolate all intermediate calculations. Added a `verbose=` and `parm=` to `summary()`. Streamlined clunky code in `confint()` including removing the `all` and `both` options from `parm=`. Added more checks for abd inputs, notes in the code as to sources for the fomulae, and tests. -# FSA 0.4.19 Jul14 +# FSA 0.4.19 +* **Date:** Jul14 * Modified some tests to check whether the suggested package was installed. * `capHistSum()`: Modified. Changed column and row labels for `$methodB.top` and column labels for `$methodB.bot`. Added a m-array object for when more than two sampling events are present. Added calculations for the number of fish first seen on event i (ui), the number of fish last seen on event i (vi), and the number of fish seen i times (fi) to `$sum`. * `jolly()`: Added. Same as `mrOpen()`, added only for convenience. @@ -979,7 +1059,8 @@ * `plot.CapHistSum()`: Added. * `plot.mrClosed()`: Modified. Changed axis labels as the expressions did not print with some fonts and devices. -# FSA 0.4.18 Jul14 +# FSA 0.4.18 +* **Date:** Jul14 * Moved to compiling under R 3.1.1. * Added a Suggests for `marked` for the example in `capHistConvert()`. * `ageBias()`: Modified. Changed default value of `min.n.CI=` from 5 to 3. Added an `na.rm=TRUE` to the `min()` and `max()` that produced the age ranges for the age agreement table. @@ -992,7 +1073,8 @@ * `plot.AgeBias()`. Modified. Fixed bug that produced a warning if all of the bias t-tests were either significant or not significant. Changed `col.err=` to `col.CI=`, `lwd.err=` to `lwd.CI=`, `col.err.sig=` to `col.CIsig=`, `col.ref=` to `col.agree=`, `lwd.ref=` to `lwd.agree=`, `lty.ref=` to `lty.agree=`, `show.rng=` to `show.range=`, `col.rng=` to `col.range=`, `lwd.rng=` to `lwd.range=`. Removed `col.lab=` and `row.lab=` which were deprecated several minor versions ago. Changed default values for `lwd.rng=` and `lwd.CI=` from 2 to 1. Added a `cex.numbers=` argument for controlling the size of the numbers in the "numbers plot" (defaults to 0.9). * `plotBinResp()`: Modified. Changed `trans.pt=` to `transparency=`. -# FSA 0.4.17 Jul14 +# FSA 0.4.17 +* **Date:** Jul14 * `confint.mrClosed()`: Modified. Moved all internal functions outside of `confint.mrClosed()` environment (see `iCI.MRCMultiple()` and `iCI.MRCSingle()`). Changed `ci.type=` to just `type=`. Streamlined binomial method for single census. Used `iMRCSingleSE()` to get SE for when `type="normal"` for Chapman, Bailey, and Ricker methods. * `extraSS()`: Modified. Slight change to row labels in output table. * `iMRCMultiple()`: Added. Was `mrc2()` internal function inside of `mrClosed()` environment. @@ -1003,7 +1085,8 @@ * `plot.mrClosed()`: Modified. Removed setting of `par()`. Changed from using `lowess()` to using `loess()` and set better default values. Added descriptive text to help file. * `summary.mrClosed()`: Modified. Moved SE calculations into an internal function (see `iMRCSingleSE()`). -# FSA 0.4.16 Jul14 +# FSA 0.4.16 +* **Date:** Jul14 * `BluegillLM`: Modified. Added a seealso. * `residPlot()`: Modified. Changed the loess-related methods to use `loess()`, to put an approximate confident band with the line, the line and band are "under" the points, the line is lighter. Put the horizontal reference line at zero under the points. Made `loess=TRUE` the default. * `iAddLoessLine()`: Modified. See `residPlot()`. @@ -1015,7 +1098,8 @@ * `SMBassWB`: Modified. Added a seealso. -# FSA 0.4.15 Jun14 +# FSA 0.4.15 +* **Date:** Jun14 * lots of roxygen2 Rd cleaning. * `addLoessLine()`: Deleted. Moved functionality to `iAddLoessLine()` and moved code to `residPlot()` file.. * `addOutlierTestResults()`: Deleted. Moved functionality to `iAddOutlierTestResults()` and moved code to `residPlot()` file. @@ -1069,8 +1153,8 @@ * `typeoflm()`: Deleted. Moved functionality to `iTypeoflm()`. * `wsLitCheck()`: Deleted. Moved functionality to `iwsLitCheck()` and moved code to `wsVals()` file. -# FSA 0.4.14 Jun14 - +# FSA 0.4.14 +* **Date:** Jun14 * added tests (in `test_VonB2b.R`) to assure that group comparisons of von Bertalanffy parameters equal those in Kimura (1980) and `vblrt()` in `fishmethods`. * added importsFrom for `lmtest` for `lrt()`. Also used in testing (`test_VonB2b.R`). * `confint.nlsBoot()`: Modified. Modified the plotting to use `hist.formula()`, removed `par(mar=)` definitions, and added `err.col=` and `lwd.col=` to control the color and line width of the confidence interval line on the plot. @@ -1082,7 +1166,8 @@ * `vbModels()`: Modified. Added Weisberg parameterization. Changed `vbGallucciQuinn` to `vbGQ`. * `vbStarts()`: Modified. Added Weisberg parameterization. Added `vbGQ` abbreviation (synonymous with `vbGallucciQuinn`). Added an internal function for checking whther the starting values for K and Linf made sense. -# FSA 0.4.13 Jun14 +# FSA 0.4.13 +* **Date:** Jun14 * added testthat files for error checking of `chapmanPlot()`, `vbFuns()`, `vbStarts()`, and `walfordPlot()`. Added a testthat file for checking that the von Bertalanffy fitting using `vbFuns()` and `vbStarts()` matches other sources. * `ageBias()`: Modified. Deprecated `col.lab=` and `row.lab=` and replaced with `ref.lab=` and `nref.lab=`. Moved all functions that were internal to main functions to being internal to the package. In the process, I changed the names of the internal functions slightly, made explicit the argument passing, and added internal descriptions of the internal files. Changed several if else strings in the plot method to a `switch()`. @@ -1097,7 +1182,8 @@ * `vbStarts()`: Modified. Removed S3 functions so that `vbStarts()` has to use a formula. Added some checking related to the formula. Changed `tFrancis=` to `ages2use=`. Changed the Schnute method to use the ages in `ages2use=` rather than being hard-wired to use the minimum and maximum observed age. Both the Schnute and Francis methods will use the minimum and maximum observed ages if `ages2use=NULL`. Added a catch for if `ages2use=` are in descending order (should be in ascending order). Changed `Schnute` parameterization to use L3 instead of L2. * `walfordPlot()`: Modified. Removed S3 functions so that `vbStarts()` has to use a formula. Added some checking related to the formula. -# FSA 0.4.12 May14 +# FSA 0.4.12 +* **Date:** May14 * added Suggests for `testthat`, `fishmethods`, `FSAdata` for testing and `popbio` for an example that was made "interactive" from "dont run"(see below). * added testthat files for `ageBias()` and `agePrecision()`. @@ -1117,7 +1203,8 @@ * `view()`: Deleted. Moved to NCStats package. * `wrDataPrep()`: Modified. Changed Rd file for deletion of `view()`. -# FSA 0.4.11 May14 +# FSA 0.4.11 +* **Date:** May14 * Removed Roxygen directives in DESCRIPTION (with changes to roxygen2 4.0.1). * Changed `@S3method` and `@method` to `@export` in the following files according to changes in ROxygen2 as [described here](https://stackoverflow.com/questions/7198758/roxygen2-how-to-properly-document-s3-methods/7199577/), among several other places: `ageBias`, `agePrecision`, `bootCase`, `catchCurve`, `chapmanRobson`, `confint.nlsboot`, `depletion`, `dietOverlap`, `fitPlot`, `hist.formula`, `htest.nlsBoot`, `ks2d1`, `ks2d1p`, `ks2d2`, `ks2d2p`, `ksTest`, `lencat`, `mrClosed`, `mrOpen`, `plotBinResp`, `predict.nlsBoot`, `removal`, `residPlot`, `srStarts`, `Subset`, `Summarize`, `sumTable`, `vbStarts`, and `walfordChapmanPlot`. @@ -1129,7 +1216,8 @@ * `lwPredsComp()`: Modified. Streamlined the code (no changes to functionality). * `mrOpen()`: Modified. Streamlined the code (no changes to functionality). Removed all explicity partial matching options in `switch()`es as these were already caught with previous `match.arg()`s. -# FSA 0.4.10 May14 +# FSA 0.4.10 +* **Date:** May14 * Added Roxygen directives to DESCRIPTION. * Updated to Roxygen2 4.0.0 which modified several help files. @@ -1144,7 +1232,8 @@ * `psdDataPrep()`: Modified. Fixed error around `use.catnames=`. * `swvCounts()`: Modified. Fixed error in output. -# FSA 0.4.9 May14 +# FSA 0.4.9 +* **Date:** May14 * Removed nlme dependencies (with removal of `vbDataGen()`). * `ageComp()`: Deleted. Fully deprecated. Use `ageBias()` and `agePrecision()` instead. @@ -1161,12 +1250,14 @@ * `vbFuns()`: Modified. Remove link to `vbComp()`. * `VBGMlit()`: Deleted. Moved to FSAsim package. -# FSA 0.4.8 May14 +# FSA 0.4.8 +* **Date:** May14 * `ageBias()`: Modified. Added the ability to use multiple `what=` arguments with `c()`. Added `what="n"` to get the sample size on the age-agreement table. Added `nYpos=` to `plot()` to allow control of the position of the sample size values on the plot. Changed the order of the printing of results when `what="symmetry"` is used in `summary()`. The order more closely follows the "level of complexity" of the tests. Added unit test examples to the help file. * `agePrecision()`: Modified. Added the ability to use multiple `what=` arguments with `c()`. * `hndlMultWhat()`: Added. An internal file to help `ageBias()` and `agePrecision` handle multiple `what=` arguments. -# FSA 0.4.7 Apr14 +# FSA 0.4.7 +* **Date:** Apr14 * Removed all of the functions related to constructing and validating standard weight equations. These are now in the [FSAWs package](https://github.com/droglenc/FSAWs). This is the start of an effort to streamline the FSA package. * Removed importFrom quantreg (only used for standard weight methods). @@ -1182,7 +1273,8 @@ * `wsValidate()`: Removed. * `WalleyeGerowLW`: Removed. -# FSA 0.4.6 Apr14 +# FSA 0.4.6 +* **Date:** Apr14 * Changed to compiling under R 3.1.0 * Imported `stackpoly()` from plotrix for use in `ageKeyPlot()`. * Added concepts (that largely match those in the FSAdata pacakge) to most of the data files. @@ -1193,7 +1285,8 @@ * `lencat()`: Modified. Added generic functions. `lencat.default()` accepts a vector as its first argument and returns a single vector. `lencat.formula()` accepts a formula as its first argument and the `data=` argument. The `lencat.formula()` is the same as the old `lencat()` and `lencat.default()` provides new functionality. Additionally, the default for `startcat=` is now `NULL` and a value for `startcat=` is found automatically (though a value can still be supplied by the user). The `use.catnames=` was changed to `use.names=`. Other changes were made to simplify the code. * `lenFreqExpand()`: Modified. Removed the `df=` and `cl=` arguments and replaced with `x=`, which is simply a vector of length measurements. Changed to `startcat=NULL` so that that the starting category value can be determined automatically (or can still be set by the user). -# FSA 0.4.5 Apr14 +# FSA 0.4.5 +* **Date:** Apr14 * Converted to using github as a repository. * Changed NEWS to NEWS.md * Added ImportFrom for relax package (see below). @@ -1212,12 +1305,14 @@ * `plotBinResp()`: Modified. Added `yaxis1.ticks=` and `yaxis1.lbls=` arguments so that the user can control the tick-mark locations and labels for the left y-axis (the defaults are to show ticks every 0.1 units but only label 0, 0.5, and 1). Added `yaxis2.show=` argument to allow the user to "turn-off" the right y-axis (defaults to being on) which is labeled with the level labels. * `srSim()`: Added back from FSATeach (required adding ImportFrom for relax package). -# FSA 0.4.4 Apr14 +# FSA 0.4.4 +* **Date:** Apr14 * `ageKeyPrep()`: Added. * `agePrecision()`: Modified. Fixed the bug where the APE and CV were over-estimated in situations where the multiple ages agreed at an age=0 (thanks to Richard McBride for pointing out this error). * `wsLit`: Modified. Added Pursak chub information from Sulun et al. (2014). -# FSA 0.4.3 Mar14 +# FSA 0.4.3 +* **Date:** Mar14 * `ageBias()`: Added. Extracted the age-bias related material from `ageComp()`. Modified the code to remove unneeded code. From `ageComp()`, remove the `what=` argument related to differences and added a `difference=` argument. Also changed `what="bias.diff"` to `what="diff.bias"` to allow for a quicker partial matching (i.e. separate more from `what="bias"`). Major modifications to how the axis limits are created if none are provided. Modified where the sample size is shown on the age-bias plot. Added the `min.n.CI=` argument. Added an example using `WhitefishLC` to be consistent with `agePrecision()`. * `ageComp()`: Modified. Split into `ageBias()` and `agePrecision()`. Added a warning that this function is deprecated and will be removed in the future. * `ageKey()`: Modified. Fixed a bug that occurred when a data frame that already contained an LCat variable was provided. @@ -1258,7 +1353,8 @@ * `WhitefishLC`: Added (from FSAdata). * `wsLit`: Modified. Changed all species names to have both words capitalized so as to follow the latest AFS guidelines. -# FSA 0.4.2 Dec13 +# FSA 0.4.2 +* **Date:** Dec13 * Changed to compiling under R 3.0.2. * Removed dependency on reshape package (see changes for `emp()`, `gReshape()`, and `ssValidate()` below) and the relax, tcltk, and TeachingDemos packages (see changes for `catchCurveSim()`, `cohortSim()`, `growthModelSim()`, `leslieSim()`, `lwModelSim()`, `mrClosed1Sim()`, `simAgeBias()`, `simAges()`, `simLenFromAge()`, `simLenSelect()`, and `srSim()` below). @@ -1286,7 +1382,8 @@ * `vbStarts()`: Modified. Changed tFrancis argument to use only two ages. Changed the default for `meth.EV=` to "poly". Removed jittering and added a transparency to the plot. Removed the box around the legend and moved the legend to the "bottomright." Fixed a typo in the plot heading. * `wsValidate()`: Modified. Replaced use of `cast()` with `aggregate()`. -# FSA 0.4.1 Oct13 +# FSA 0.4.1 +* **Date:** Oct13 * Changed R dependency to >3.0.0 (because gplots package has that dependency). * Added importFrom for `cast()`, `is.formula()`, and `melt()` in reshape package. @@ -1306,7 +1403,8 @@ * `plotH()`: Deleted, moved to plotrix package. * `quad_dens()`: Added to FSAinternals (from `ks2d()`). -# FSA 0.4.0 Jun13 +# FSA 0.4.0 +* **Date:** Jun13 * Corrected all pointers to fishR vignettes (because of new webpage). * Removed importFrom color.scale from plotrix because of changes to `discharge()` and `wetPerim()`. * removed importFrom %nin% from Hmisc. See multiple changes because of this below. @@ -1360,7 +1458,8 @@ * `wsVal()`: Modified. A major modification to account for the major changes to `WSLit`. * `wsValidate()`: Removed use of %nin%. -# FSA 0.3.4 Jan13 +# FSA 0.3.4 +* **Date:** Jan13 * added special "fishR Vignette" sections with links to several help files. * `binCI()`: Modified so that result is a matrix rather than sometimes (when only @@ -1380,7 +1479,8 @@ one set of CIs were computed) being a vector. * `vbFuns()`: Modified slightly the messages if `msg=TRUE`. Added a message for the Wang2 model and corrected an error for the Somers2 model. * `view()`: Modified to remove the ability to print to a window (use method built into RStudio instead). Also generalized to use for both a matrix or a data.frame (eliminates some warning messages). -# FSA 0.3.3 21Dec12 +# FSA 0.3.3 +* **Date:** 21Dec12 * Added ImportFrom for `slider()` and `gslider()` from the relax package. Deleted the ImportFrom for `slider()` from the `TeachingDemos` package. These functions were the same but it was being deprecated from `TeachingDemos`. * General: added `call.=FALSE` to several `stop()`s and `warning()`s. * General: replaced `paste()` inside of several `cat()`s. @@ -1418,7 +1518,8 @@ one set of CIs were computed) being a vector. * `TroutDietSL`: Added for use with `dietOverlap()`. * `vbStarts()`: Modified by including a catch for negative starting values of K or starting values of Linf that are 50% smaller or larger than the observed maximum length in the data set. -# FSA 0.3.2 1Dec12 +# FSA 0.3.2 +* **Date:** 1Dec12 * Changed R dependency to >2.14.0. * Added a ImportsFrom for knitr (purl() in swvCode() added below). * Moved gdata to an ImportsFrom from Suggests. Needed for nobs() in ci.fp1() which is used in fitPlot.ONEWAY and drop.levels() used in the example in RuffeWs. @@ -1439,7 +1540,8 @@ one set of CIs were computed) being a vector. * `view()`: added from NCStats. * `wsVal(), wrAdd()`: added code to eliminate "global bindings" note when performing RCMD Check. Solutions came from Adrian Alexa's response to this question: https://groups.google.com/forum/?fromgroups=#!topic/cambridge-r-user-group/c7vf8o3QwDo -# FSA 0.3.1 25Nov12 +# FSA 0.3.1 +* **Date:** 25Nov12 * Switched to using the Project mode in RStudio. * Switched to using semantic versioning for the version number (which means that the hyphen before the last number has been replaced by a period). * Switched to using roxygen to construct help files. @@ -1486,7 +1588,8 @@ one set of CIs were computed) being a vector. * `wsValidate()`: modified the classnames to "willis" from "WILLIS" and "empq" from "EMPQ". Also made minor modification because of class name change in FroeseWs() * `ycPlot()`: deleted (Weisberg's LGM is now out-dated). -# FSA 0.3-0 8-Nov-12 +# FSA 0.3-0 +* **Date:** 8-Nov-12 * Moved several functions from NCStats that are used quite often for fisheries analyses. Ultimately, I want to remove the dependency to NCStats. * Deleted an importFrom for gtools, created an internal odd() instead. * Added an importFrom for gplots, to get rich.colors() for chooseColors(). @@ -1514,7 +1617,8 @@ one set of CIs were computed) being a vector. * `rsdPlot()`: modified to handle situations where substock fish are not present in the data. Thanks to Max Wolter for pointing out this issue. * `Subset()`: copied from NCStats (i.e., same function still in NCStats). -# FSA 0.2-8 21Jun12 +# FSA 0.2-8 +* **Date:** 21Jun12 * Switched to compiling under R version 2.14.1 (64-bit). * Changed license specification from "GPL version 2 or newer" to "GPL (>= 2)" to avoid warning on check. * Added a suggestion for gdata to fix warning with capHistConver.rd (see below). @@ -1524,7 +1628,8 @@ one set of CIs were computed) being a vector. * `simAgeBias()`: changed width to widths in layout() to avoid warning on check. * `simLenSelectM()`: changed width to widths in layout() to avoid warning on check. -# FSA 0.2-7 2Mar12 +# FSA 0.2-7 +* **Date:** 2Mar12 * `.onLoad()`: Modified. Moved the startup message into packageStartupMessage() in hopes of eliminating the warning when checking the package. * `catchCurveSim()`: Modified. Changed max.age= to 15 (from 10). Slightly changed the labels related to 'Z Steady' and 'N* `Steady'. * `chapmanRobson()`: Modified. Corrected a bug for when the ages2use= argument contains ages that are not found in the data. Thanks to Eric Berglund for finding this bug. @@ -1536,7 +1641,8 @@ one set of CIs were computed) being a vector. * `wrVal()`: Modified. As described for wsVal(). * `wsVal()`: Modified. Removed the justcoef= argument. Added the ab= and comment= arguments. Also, removed the appended units names from the names attribute -- i.e., "int" rather than "int.E" or "int.mm". -# FSA 0.2-6 1Oct11 +# FSA 0.2-6 +* **Date:** 1Oct11 * Switched to compiling under R version 2.13.1 (32-bit). * Removed importFroms that were required for updateFSA(). * Removed splines package from imports list (not needed). @@ -1549,7 +1655,8 @@ one set of CIs were computed) being a vector. * `vbFuns()`: Modified. Added 'Somers2' option to type= argument. * `vbStarts()`: Modified. Added 'Somers2' option to type= argument. -# FSA 0.2-5 19Aug11 +# FSA 0.2-5 +* **Date:** 19Aug11 * Modified description file to show my e-mail address. * Added `cnvrt.coords()` as an ImportFrom TeachingDemos. Needed for `simAgeBias()` and `simLenSelectM()`. @@ -1568,12 +1675,14 @@ one set of CIs were computed) being a vector. * `simLenSelectP()`: Added. * `vbComp()`: Modified. Streamlined the code. Changed the t= argument to ages= to remove any possible confusion with t(). Removed the option to model over ages provided in the (previous) t= argument. Instead the ages= argument can be used to represent the maximum age to model to. The ages= argument can be a vector such that each simulation can have a different set of ages over which the model is evaluated. This allows for more realistinc modeling. -# FSA 0.2-4 15Jun11 +# FSA 0.2-4 +* **Date:** 15Jun11 * Switched to compiling under R version 2.13.0. * `vbFuns()`: Modified. Modified Wang's formulas to be for length increments. Added a length increments version to Faben's method ("Fabens2"). -# FSA 0.2-3 18Apr11 +# FSA 0.2-3 +* **Date:** 18Apr11 * Updated citation file. * Added importFrom for tools and utils packages. @@ -1586,7 +1695,8 @@ one set of CIs were computed) being a vector. * `updateFSA()`: Added. Had to add an importFrom from the tools package. * `vbFuns()`: Modified. Added Wang and Wang2 functions. -# FSA 0.2-2 3Mar11 +# FSA 0.2-2 +* **Date:** 3Mar11 * moved to compling under 2.12.1 (32-bit) * changed dependency to >2.11.1 @@ -1594,14 +1704,16 @@ one set of CIs were computed) being a vector. * `lencat()`: modified so that vname=NULL is the default. This will default to using "LCat" as the variable name (as in the previous version). However, modified the way the vname is appended to the new data frame so that if vname already exists in the data frame a new name will be used (vname plus some number). * `removal()`: added just.ests= argument and changed the ests part of the returned value to be a vector rather than a matrix. Both changes allowed for better use of lapply() for computing the removal estimates on more than one group. Changed from an error to a warning for situations where the method could not compute population estimates (i.e., because the population was not depleted). In addition, NAs are returned in situations where population estimates can not be made. An example of computing the removal estimate for more than one group was added to the .rd file. Thanks to Jon Bolland for asking the question that motivated these changes. -# FSA 0.2-1 31-Jan-11 +# FSA 0.2-1 +* **Date:** 31-Jan-11 * `catchCurve()`: Modified by adding a formula method. This required moving the original code into a default method and changing the age= argument to x=. * `lenFreqExpand()`: Modified by adding the additional= argument (which required modifying the total= argument and adding an error check for the situation where the total fish to assign lengths is not greater than the number of fish in the measured subsample). * `.onLoad()`: modified. Changed to include version number of loaded version. * `vbFuns()`: Modified by adding simple= argument. Added a 'Somers' seasonal growth oscillations model and 'Fabens' model for tag-recapture data. Also added, but did not check, a 'Laslett' 'double von Bertalanffy' model. * `vbStarts()`: Modified by setting a catch to return a single root for st0 or sL0 if the polynomial root found a double root. Thanks to Giacom* `Tavecchia for identifying this error. Added a 'Somers' seasonal growth oscillations model. -# FSA 0.2-0 23-Sep-10 +# FSA 0.2-0 +* **Date:** 23-Sep-10 * `bcFuns()`: Added. Still needs to be thoroughly proofed. * `FSAsims()`: Modified to reflect srSim() change described below. * `listSpecies()`: Moved internal function out of being within RSDval() and WSval() and then added an argument for the data frame containing the species names. The hope was that this would correct the "n* `visible binding" warnings when performing RCMD check but it did not. @@ -1610,7 +1722,8 @@ one set of CIs were computed) being a vector. * `vbDataGen()`: Modified use of minAge argument -- will now always back-calculate to age-1 but minAge denotes the minimum age-at-capture that will be modeled. Deleted use of cfAge variable in code. * `vbModels()`: Added. -# FSA 0.1-6 23-Aug-10 +# FSA 0.1-6 +* **Date:** 23-Aug-10 * completed changing naming convention to "camel" type -- e.g., `stockRecruitModels()` rather than `stock.recruit.models()`. * `ageComp()`: renamed from age.comp(). * `ageKey()`: renamed from age.key(). @@ -1650,7 +1763,8 @@ one set of CIs were computed) being a vector. * `wsValidate()`: renamed from validateWs(). Also modified for name changes in NCStats. * `ycPlot()`: renamed from ycplot(). -# FSA 0.1-5 20Aug10 +# FSA 0.1-5 +* **Date:** 20Aug10 * moved to compiling under 2.11.1. * started changing my naming convention to "camel" type -- e.g., `stockRecruitModels()` rather than `stock.recruit.models()`. In this version, I am only changing the functions that I am working on. I will change the rest in the next version. * added an importFrom for `nlme` as `groupedData()` was needed for `vbDataGen()`. @@ -1674,10 +1788,12 @@ one set of CIs were computed) being a vector. * `vbStarts()`: Added. * `walfordPlot()`: Added. -# FSA 0.1-4 6Jun10 +# FSA 0.1-4 +* **Date:** 6Jun10 * `growmodel.sim()`: added an option to fit the "original" von Bertalanffy function. Also added more "mis-spelling" options to the other model names. -# FSA 0.1-2 17Dec09 +# FSA 0.1-2 +* **Date:** 17Dec09 * moved to compiling under 2.10.1. * `added a dependency to tcltk so that simulators would work properly upon load of FSA. @@ -1692,7 +1808,8 @@ one set of CIs were computed) being a vector. * `lencat()`: modified so that an "extra" last length category (with no fish in it) was not included when as.fact=TRUE and drop.levels=FALSE is used. This should correct the "problem" of an extra all-NA row in the age-length keys. * `tictactoe.add()`: added to the namespace export list. Changed order of items listed in the ci.type= argument to match that of bin.ci() from NCStats. -# FSA 0.1-1 15Apr09 +# FSA 0.1-1 +* **Date:** 15Apr09 * added a namespace * removed dependencies and changed to imports ... left plotrix and quantreg as dependencies (they do not have a namespaces). left reshape as a dependency because of it's dependency on plyr. * `.FirstLib()`: removed (changed to .onLoad() because of namespace). @@ -1706,11 +1823,13 @@ one set of CIs were computed) being a vector. * `validateWs()`: converted sign.slope variable in the Willis method to a factor to deal with situations where all results were positive or negative. * `wlgm.rd: fixed the summarization example (cast() did not work with Summarize(). -# FSA 0.0-14 20Dec08 +# FSA 0.0-14 +* **Date:** 20Dec08 * `age.comp()`: streamlined code (put bias and difference plots in same function, used grconvertY for show.n, used plotCI for range intervals, caught and corrected CI problems when n=1 or SD=0). N* `functionality difference, just improved code. * `growmodel.sim()`: modified by determining some of the slider values from the data when x= and y= arguments are not null. This makes the graph more useful for determining starting values in nls() modeling. -# FSA 0.0-13 6Dec08 +# FSA 0.0-13 +* **Date:** 6Dec08 * added a dependency to quantreg (for `rq()` in `emp()`). * added CITATION file. @@ -1727,7 +1846,8 @@ one set of CIs were computed) being a vector. * `wlgm()`: major changes included moving some internal functions outside of wlgm(), adding the ability to use the data= argument, and adding the ability to fit weighted regressions on the summary statistics. Other minor changes were also made. Updated the .Rd file. * `WSval(),WRval()`: added a check for missing species name so that the user can just type WSval() to get the list of possible species names. Also added a check to see if WSlit was already loaded. -# FSA 0.0-12 15Jul08 +# FSA 0.0-12 +* **Date:** 15Jul08 * `.First.lib`: Added * `add.zerocatch()`: added this function to add zeros to catch records where a species of fish was not caught. * `limnoprofile.plot()`: added this function to simplify constructing plots of depth versus limnological measure with the depth decreasing from top to bottom. @@ -1736,7 +1856,8 @@ one set of CIs were computed) being a vector. * `FroeseWs()`: added this function, and its generics, to perform the standard weight equation calculation as proposed by Froese (2006). * `validateWs()`: added this function, and its generics, to perform the Willis and EmpQ methods for assessing length bias in the standard weight equations. Added a probs= argument to allow other than 75th percentile calculations. Added a mean= argument to allow use of means rather than quantiles. Modified to accept an object of class FROESE. -# FSA 0.0-11 15May08 +# FSA 0.0-11 +* **Date:** 15May08 * Moved to RForge.net. * changed to R2.7.0. * added a dependency to `Rcapture` (for the example in `caphist.convert()`). @@ -1748,7 +1869,8 @@ one set of CIs were computed) being a vector. * `plot.RLP()`: modified so that color palette with a gradient rather than only a solid color can be used for the populations. In addition, added order.pop= argument that will order the populations from smallest to largest predicted with in the first length interval. When used with the color gradients this will make it easier to see which populations cross over other populations. * `rlp()`: modified function so that the user can choose to use any-mm length intervals rather than having 10-mm hardwired. Modified output in list somewhat to more closely match the output of emp(). -# FSA 0.0-10 1May08 +# FSA 0.0-10 +* **Date:** 1May08 * `lencat()`: Modified by adding an as.fact= argument that allows the user to decide if the resulting variable should be returned as a factor variable or not. The default is set to return as a factor variable. This allows tables of the new variable to include zeros for levels of the new variable that contain no individuals. This makes some RSD/PSD (and likely age-length key) calculations simpler. Also added a drop.levels= argument to allow the user to drop unused levels if so desired. * `mr.closed()`: This function is a combination of the old mr.closed1() and mr.closed2(). It also allows the user to compute single census estimates with multiple sub-groups in the data (i.e., length- or age-classes). The function also allows the user to compute an overall population esitmate of multiple sub-groups are present and an overall SE if the incl.SE=TRUE is used. It also corrects the SE computations implemented in version 0.0-9. This change caused the construction of our internal functions -- mrc1, mrc2, ci.mrc1, and ci.mrc2. * `mr.closed1()`: removed this function. Use mr.closed() instead. @@ -1757,7 +1879,8 @@ one set of CIs were computed) being a vector. * `rcumsum()`: Added this function (from NCStats). * `RSDval()`: See PSDval description. -# FSA 0.0-9 unknown +# FSA 0.0-9 +* **Date:** unknown * `age.comp()`: Corrected SE calculation used to construct the CIs. Changed the CI plotting routine to use plotCI in plotrix package -- this puts lines rather than points on the ends of the CIs. Added a check for computing SDs and CIs for when n=1 or when all measurements are the same. This reduces (eliminates?) the number of warnings that are given. * `catch.curve()`: added na.rm=TRUE arguments to min() and max() in plot.CC(). Changed type= argument so that "params" is the default rather than "lm". This makes it more consistent with other simulation programs. * `cc.sim()`: Put in catch for situations where the CV for No and Z were equal to zero. Originally, the program attempted to computed a random number from a normal distribution with a standard deviation of zero. This corrected the problem of n* `lines appearing unless the CVs were greater than zero. @@ -1769,7 +1892,8 @@ one set of CIs were computed) being a vector. * `mr.closed1()`: Modified output list to include an estimate of the variance as described in Ricker(1975). * `summary.MRC1()`: Modified output so that (1) the given information is a little easier to read, (2) the population estimate is returned in a matrix, (3) the SE from Ricker(1975) can be included in the outputm, and (4) a label can be placed on row for the matrix output. The purpose of these changes was to allow the SE to be computed and to allow future functions to more flexibly use the output. -# FSA 0.0-8 unknown +# FSA 0.0-8 +* **Date:** unknown * changed some \items to \tabular in RD files. Changed most hard-wired quotes to \sQuote or \dQuote in RD files. Changed some text-based equations to more latex-based equations in \eqn or \deqn markups. This fixed the Latex compilation problems that I was having when using RCMD check. * `age.comp()`: Removed single-letter values from the what= argument. Will rely on partial matching. @@ -1787,7 +1911,8 @@ one set of CIs were computed) being a vector. * `vb.comp()`: Changed d argument to df. * `wlgm.RD()`: Added example code. Added some details. -# FSA 0.0-7 unknown +# FSA 0.0-7 +* **Date:** unknown * changed to compiling under R 2.6.1. * added FSA.R file that loads the required librarys. * now depends on `MASS` package because of the creation of the `boxcox.WLGM()` function and on the `plotrix` package for elements of `ycplot()`. @@ -1809,7 +1934,8 @@ one set of CIs were computed) being a vector. * `wlgm()`: Created a method with a number of generics (alias, anova, boxcox, coef, coefplot, confint, ycplot) for performing the Weisberg Linear Growth Model. * `ycplot()`: A new generic function for creating a year-class plot for the Weisberg Linear Growth Model analysis. -# FSA 0.0-6 unknown +# FSA 0.0-6 +* **Date:** unknown * `agebias.plot()`: deleted and replaced with agecomp and plot.AgeComp functions. * `agesunflower.plot()`: deleted and replaced with agecomp and plot.AgeComp functions. * `agecomp()`: a new function that, along with its extractor functions, combines all of the functionality of the old `age.tests()`, `age.symmetry()`, `agebias.plot()`, and `agesunflower.plot()`. Allows for a more seamless comparison of ageing reads. diff --git a/R/BluegillJL.R b/R/BluegillJL.R index c791980b..60280a0b 100644 --- a/R/BluegillJL.R +++ b/R/BluegillJL.R @@ -29,7 +29,7 @@ #' @concept Petersen #' @concept Capture History #' -#' @source From example 8.1 in Schneider, J.C. 1998. Lake fish population estimates by mark-and-recapture methods. Chapter 8 in Schneider, J.C. (ed.) 2000. Manual of fisheries survey methods II: with periodic updates. Michigan Department of Natural Resources, Fisheries Special Report 25, Ann Arbor. [Was (is?) from http://www.michigandnr.com/publications/pdfs/IFR/manual/SMII\%20Chapter08.pdf.] +#' @source From example 8.1 in Schneider, J.C. 1998. Lake fish population estimates by mark-and-recapture methods. Chapter 8 in Schneider, J.C. (ed.) 2000. Manual of fisheries survey methods II: with periodic updates. Michigan Department of Natural Resources, Fisheries Special Report 25, Ann Arbor. [Was (is?) from http://www.michigandnr.com/publications/pdfs/IFR/manual/SMII\%20Chapter08.pdf.] \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/BluegillJL.csv}{CSV file} #' #' @seealso Used in \code{\link{mrClosed}} examples. #' diff --git a/R/BrookTroutTH.R b/R/BrookTroutTH.R index 1221c759..33977377 100644 --- a/R/BrookTroutTH.R +++ b/R/BrookTroutTH.R @@ -22,7 +22,7 @@ #' @concept Mortality #' @concept Catch Curve #' -#' @source Quinlan, H.R. 1999. Biological Characteristics of Coaster Brook Trout at Isle Royale National Park, Michigan, 1996-98. U.S. Fish and Wildlife Service Ashland Fishery Resources Office report. November 1999. [Was (is?) from http://www.fws.gov/midwest/ashland/brook/biochar/biolchar.html.] +#' @source Quinlan, H.R. 1999. Biological Characteristics of Coaster Brook Trout at Isle Royale National Park, Michigan, 1996-98. U.S. Fish and Wildlife Service Ashland Fishery Resources Office report. November 1999. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/BrookTroutTH.csv}{CSV file} #' #' @seealso Used in \code{\link{catchCurve}} and \code{\link{chapmanRobson}} examples. #' diff --git a/R/CIDists.R b/R/CIDists.R index 715ea4f1..e9a58c6e 100644 --- a/R/CIDists.R +++ b/R/CIDists.R @@ -20,7 +20,7 @@ #' #' @return A #x2 matrix that contains the lower and upper confidence interval bounds as columns and, if \code{verbose=TRUE} \code{x}, \code{n}, and \code{x/n} . #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com}, though this is largely based on \code{binom.exact}, \code{binom.wilson}, and \code{binom.approx} from the old epitools package. +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com}, though this is largely based on \code{binom.exact}, \code{binom.wilson}, and \code{binom.approx} from the old epitools package. #' #' @seealso See \code{\link{binom.test}}; \code{binconf} in \pkg{Hmisc}; and functions in \pkg{binom}. #' @@ -138,7 +138,7 @@ binCI <- function(x,n,conf.level=0.95,type=c("wilson","exact","asymptotic"), #' #' @return A #x2 matrix that contains the lower and upper confidence interval bounds as columns and, if \code{verbose=TRUE} \code{x}. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com}, though this is largely based on \code{pois.exact}, \code{pois.daly}, \code{pois.byar}, and \code{pois.approx} from the old epitools package. +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com}, though this is largely based on \code{pois.exact}, \code{pois.daly}, \code{pois.byar}, and \code{pois.approx} from the old epitools package. #' #' @keywords htest #' @@ -270,7 +270,7 @@ poiCI <- function(x,conf.level=0.95,type=c("exact","daly","byar","asymptotic"), #' #' @return A 1x2 matrix that contains the lower and upper confidence interval bounds. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @keywords htest #' diff --git a/R/ChinookArg.R b/R/ChinookArg.R index 78070f70..1fd0b72b 100644 --- a/R/ChinookArg.R +++ b/R/ChinookArg.R @@ -20,7 +20,7 @@ #' #' @concept Weight-Length #' -#' @source From Figure 4 in Soto, D., I. Arismendi, C. Di Prinzio, and F. Jara. 2007. Establishment of Chinook Salmon (\emph{Oncorhynchus tshawytscha}) in Pacific basins of southern South America and its potential ecosystem implications. Revista Chilena d Historia Natural, 80:81-98. [Was (is?) from http://www.scielo.cl/pdf/rchnat/v80n1/art07.pdf.] +#' @source From Figure 4 in Soto, D., I. Arismendi, C. Di Prinzio, and F. Jara. 2007. Establishment of Chinook Salmon (\emph{Oncorhynchus tshawytscha}) in Pacific basins of southern South America and its potential ecosystem implications. Revista Chilena d Historia Natural, 80:81-98. [Was (is?) from http://www.scielo.cl/pdf/rchnat/v80n1/art07.pdf.] \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/ChinookArg.csv}{CSV file} #' #' @seealso Used in \code{\link{lwCompPreds}} examples. #' diff --git a/R/CodNorwegian.R b/R/CodNorwegian.R index 835621b5..2146e1dc 100644 --- a/R/CodNorwegian.R +++ b/R/CodNorwegian.R @@ -22,7 +22,7 @@ #' @concept Stock-Recruit #' @concept Recruitment #' -#' @source From Garrod, D.J. 1967. Population dynamics of the Arcto-Norwegian Cod. Journal of the Fisheries Research Board of Canada, 24:145-190. +#' @source From Garrod, D.J. 1967. Population dynamics of the Arcto-Norwegian Cod. Journal of the Fisheries Research Board of Canada, 24:145-190. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/CodNorwegian.csv}{CSV file} #' #' @seealso Used in \code{\link{srStarts}}, \code{\link{srFuns}}, and \code{\link{nlsTracePlot}} examples. #' diff --git a/R/CutthroatAL.R b/R/CutthroatAL.R index 3363d3ad..8448c7cc 100644 --- a/R/CutthroatAL.R +++ b/R/CutthroatAL.R @@ -37,7 +37,7 @@ #' @concept Jolly-Seber #' @concept Capture History #' -#' @source From Appendix A.3 of Harding, R.D., C.L. Hoover, and R.P. Marshall. 2010. Abundance of Cutthroat Trout in Auke Lake, Southeast Alaska, in 2005 and 2006. Alaska Department of Fish and Game Fisheries Data Series No. 10-82. [Was (is?) from http://www.sf.adfg.state.ak.us/FedAidPDFs/FDS10-82.pdf.] +#' @source From Appendix A.3 of Harding, R.D., C.L. Hoover, and R.P. Marshall. 2010. Abundance of Cutthroat Trout in Auke Lake, Southeast Alaska, in 2005 and 2006. Alaska Department of Fish and Game Fisheries Data Series No. 10-82. [Was (is?) from http://www.sf.adfg.state.ak.us/FedAidPDFs/FDS10-82.pdf.] \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/CutthroatAL.csv}{CSV file} #' #' @note Entered into \dQuote{RMark} format (see \code{\link[FSAdata]{CutthroatALf}} in \pkg{FSAdata}) and then converted to individual format with \code{\link{capHistConvert}} #' diff --git a/R/Ecoli.R b/R/Ecoli.R index b8bd2b12..c6272506 100644 --- a/R/Ecoli.R +++ b/R/Ecoli.R @@ -21,7 +21,7 @@ #' @concept Nonlinear Model #' @concept Other #' -#' @source McKendrick, A.G. and M. Kesava Pai. 1911. The Rate of Multiplication of Micro-Organisms: a Mathematical Study. Proceedings of the Royal Society of Edinburgh. 31:649-655. +#' @source McKendrick, A.G. and M. Kesava Pai. 1911. The Rate of Multiplication of Micro-Organisms: a Mathematical Study. Proceedings of the Royal Society of Edinburgh. 31:649-655. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/Ecoli.csv}{CSV file} #' #' @keywords datasets NULL diff --git a/R/FSA.R b/R/FSA.R index 3742768c..08c679c9 100644 --- a/R/FSA.R +++ b/R/FSA.R @@ -2,7 +2,7 @@ #' #' @description Functions to support basic fisheries stock assessment methods. #' -#' @details Functions from this package can be used to perform a variety of basic fisheries stock assessment methods. Detailed descriptions for most functions are available in the \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analysis with R} book (Ogle 2016). Vignettes for the boxed examples in the \dQuote{Analysis and Interpretation of Freshwater Fisheries Data} book can be viewed with \code{fishR("AIFFD")}. +#' @details Functions from this package can be used to perform a variety of basic fisheries stock assessment methods. Detailed descriptions for most functions are available in the Introductory Fisheries Analysis with R book (Ogle 2016) (see \code{fishR("IFAR")}). Vignettes for the boxed examples in the \dQuote{Analysis and Interpretation of Freshwater Fisheries Data} book can be viewed with \code{fishR("AIFFD")}. #' #' Questions, comments, or suggestions should be given on the \href{https://github.com/fishR-Core-Team/FSA/issues/}{GitHub FSA Issues page}. #' @@ -13,10 +13,10 @@ #' \item The \href{https://github.com/droglenc/FSAWs/}{FSAWs package} contains functions for developing and validating standard weight equations. #' } #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' @docType package #' #' @name FSA #' -NULL +NULL \ No newline at end of file diff --git a/R/FSAUtils.R b/R/FSAUtils.R index 6360815f..6d981127 100644 --- a/R/FSAUtils.R +++ b/R/FSAUtils.R @@ -7,7 +7,7 @@ #' #' @return A single string with the first letter of the first or all words capitalized. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @keywords manip #' @@ -85,7 +85,7 @@ iCapFirst<- function(x,which=c("all","first")) { #' #' @return A vector of hexadecimal strings of the form "#rrggbbaa" as would be returned by \code{\link[grDevices]{rgb}}. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso See \code{\link[grDevices]{col2rgb}} for similar functionality. #' @@ -117,7 +117,7 @@ col2rgbt <- function(col,transp=1) { #' #' @return A numeric vector. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @keywords manip #' @@ -154,14 +154,14 @@ fact2num <- function(object) { #' @title Opens web pages associated with the fishR website. #' -#' @description Opens web pages associated with the \href{https://derekogle.com/fishR/}{fishR website} in a browser. The user can open the main page or choose a specific page to open. +#' @description Opens web pages associated with the \href{https://fishr-core-team.github.io/fishR/}{fishR website} in a browser. The user can open the main page or choose a specific page to open. #' #' @param where A string that indicates a particular page on the fishR website to open. #' @param open A logical that indicates whether the webpage should be opened in the default browser. Defaults to \code{TRUE}; \code{FALSE} is used for unit testing. #' #' @return None, but a webpage will be opened in the default browser. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @keywords misc #' @@ -169,26 +169,28 @@ fact2num <- function(object) { #' \dontrun{ #' ## Opens an external webpage ... only run interactively #' fishR() # home page -#' fishR("IFAR") # Introduction to Fisheries Analysis with R page -#' fishR("general") # examples page +#' fishR("posts") # blog posts (some examples) page #' fishR("books") # examples page +#' fishR("IFAR") # Introduction to Fisheries Analysis with R page #' fishR("AIFFD") # Analysis & Interpretation of Freshw. Fisher. Data page -#' fishR("posts") # blog posts (some examples) page +#' fishR("packages") # list of r-related fishereis packages +#' fishR("data") # list of fisheries data sets #' } #' #' @export -fishR <- function(where=c("home","IFAR","general","books", - "AIFFD","posts","news"), +fishR <- function(where=c("home","posts","books","IFAR","AIFFD", + "packages","data"), open=TRUE) { where <- match.arg(where) - tmp <- "https://derekogle.com/" + tmp <- "https://fishr-core-team.github.io/fishR/" switch(where, - home= { tmp <- paste0(tmp,"fishR") }, - IFAR= { tmp <- paste0(tmp,"IFAR") }, - general={ tmp <- paste0(tmp,"fishR/examples") }, - books= { tmp <- paste0(tmp,"fishR/examples") }, - AIFFD= { tmp <- paste0(tmp,"aiffd2007") }, - posts=,news= { tmp <- paste0(tmp,"fishR/blog") } + home= { tmp <- paste0(tmp,"") }, + posts= { tmp <- paste0(tmp,"blog/") }, + books= { tmp <- paste0(tmp,"pages/books.html") }, + IFAR= { tmp <- paste0(tmp,"pages/books.html#introductory-fisheries-analyses-with-r") }, + AIFFD= { tmp <- paste0(tmp,"pages/books.html#analysis-and-interpretation-of-freshwater-fisheries-data-i") }, + packages= { tmp <- paste0(tmp,"pages/packages.html") }, + data= { tmp <- paste0(tmp,"pages/data_fishR_alpha.html")} ) if (open) utils::browseURL(tmp) invisible(tmp) @@ -206,7 +208,7 @@ fishR <- function(where=c("home","IFAR","general","books", #' #' @return A matrix or data.frame with 2*n rows. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @note If \code{n} is larger than the number of rows in \code{x} then all of \code{x} is displayed. #' @@ -277,7 +279,7 @@ headtail <- function(x,n=3L,which=NULL,addrownums=TRUE,...) { #' #' @return A vector or matrix of lagged ratios. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso \code{diff} #' @@ -330,7 +332,7 @@ lagratio <- function(x,lag=1L,recursion=1L,differences=recursion, #' #' @return A numeric value that is the correction factor according to Sprugel (1983). #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @references Sprugel, D.G. 1983. Correcting for bias in log-transformed allometric equations. Ecology 64:209-210. #' @@ -380,7 +382,7 @@ logbtcf <- function(obj,base=exp(1)) { #' #' @return A logical vector of the same length as x. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @keywords manip #' @@ -429,7 +431,7 @@ iOddEven <- function(x,checkval) { #' #' @return A single numeric that is the percentage of values in \code{x} that meet the criterion in \code{dir} relative to \code{val}. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @keywords misc #' @@ -492,7 +494,7 @@ perc <- function(x,val,dir=c("geq","gt","leq","lt"),na.rm=TRUE, #' #' @return A matrix or data.frame with n rows. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @author A. Powell Wheeler, \email{powell.wheeler@@gmail.com} #' @@ -561,7 +563,7 @@ peek <- function(x,n=20L,which=NULL,addrownums=TRUE) { #' #' @return A numeric vector that contains the prior-to or reverse cumulative sums. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso \code{\link{cumsum}}. #' @@ -697,7 +699,7 @@ rSquared.lm <- function(object,digits=getOption("digits"), #' #' @return A single logical that indicates which rows of \code{df} to keep such that no consecutive rows (for the columns used) will be repeated. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @keywords manip #' @@ -768,7 +770,7 @@ repeatedRows2Keep <- function(df,cols2use=NULL,cols2ignore=NULL, #' #' @return A single numeric that is the standard error of the mean of \code{x}. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso See \code{se} in \pkg{sciplot} for similar functionality. #' @@ -807,7 +809,7 @@ se <- function (x,na.rm=TRUE) { #' #' @seealso See \code{\link[plotrix]{valid.n}} in \pkg{plotrix} and \code{nobs} in \pkg{gdata} for similar functionality. See \code{\link{is.na}} for finding the missing values. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 2-Basic Data Manipulations. #' @@ -911,4 +913,4 @@ iChk4Geos <- function(x,na.rm,zneg.rm) { x <- x[x>0] } x -} \ No newline at end of file +} diff --git a/R/Mirex.R b/R/Mirex.R index 7e02fe6f..a24cd7ba 100644 --- a/R/Mirex.R +++ b/R/Mirex.R @@ -25,7 +25,7 @@ #' @concept Linear Models #' @concept Other #' -#' @source From (actual data) Makarewicz, J.C., E.Damaske, T.W. Lewis, and M. Merner. 2003. Trend analysis reveals a recent reduction in mirex concentrations in Coho (\emph{Oncorhynchus kisutch}) and Chinook (\emph{O. tshawytscha}) Salmon from Lake Ontario. Environmental Science and Technology, 37:1521-1527. +#' @source From (actual data) Makarewicz, J.C., E.Damaske, T.W. Lewis, and M. Merner. 2003. Trend analysis reveals a recent reduction in mirex concentrations in Coho (\emph{Oncorhynchus kisutch}) and Chinook (\emph{O. tshawytscha}) Salmon from Lake Ontario. Environmental Science and Technology, 37:1521-1527. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/Mirex.csv}{CSV file} #' #' @keywords datasets #' diff --git a/R/PSDlit.R b/R/PSDlit.R index f7e3c3b9..d5693b79 100644 --- a/R/PSDlit.R +++ b/R/PSDlit.R @@ -37,7 +37,7 @@ #' #' @seealso See \code{\link{psdVal}}, \code{\link{psdCalc}}, \code{\link{psdPlot}}, \code{\link{psdAdd}}, and \code{\link{tictactoe}} for related functionality. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' @source Original summary table from Dr. Michael Hansen, University of Wisconsin-Stevens Point. Additional species have been added by the package author from the literature. #' diff --git a/R/PikeNY.R b/R/PikeNY.R index 7c1bde5c..74d544b3 100644 --- a/R/PikeNY.R +++ b/R/PikeNY.R @@ -30,7 +30,7 @@ #' @concept Capture-Recapture #' @concept Schnabel #' -#' @source New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216). +#' @source New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216). \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/PikeNY.csv}{CSV file} #' #' @seealso Used in \code{\link{mrClosed}} examples. Also see \code{\link{PikeNYPartial1}}. #' diff --git a/R/PikeNYPartial1.R b/R/PikeNYPartial1.R index ebc5bb56..f77c6405 100644 --- a/R/PikeNYPartial1.R +++ b/R/PikeNYPartial1.R @@ -33,7 +33,7 @@ #' @concept Schnabel #' @concept Capture History #' -#' @source Summary values taken from Table C-1 of New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216). +#' @source Summary values taken from Table C-1 of New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216). \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/PikeNYPartial1.csv}{CSV file} #' #' @seealso Used in \code{\link{capHistSum}} and \code{\link{mrClosed}} examples. Also see \code{\link{PikeNY}}. #' diff --git a/R/SMBassLS.R b/R/SMBassLS.R index c05895de..3efc9ce3 100644 --- a/R/SMBassLS.R +++ b/R/SMBassLS.R @@ -30,7 +30,7 @@ #' @concept DeLury #' @concept Catchability #' -#' @source From Omand, D.N. 1951. A study of populations of fish based on catch-effort statistics. Journal of Wildlife Management, 15:88-98. +#' @source From Omand, D.N. 1951. A study of populations of fish based on catch-effort statistics. Journal of Wildlife Management, 15:88-98. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/SMBassLS.csv}{CSV file} #' #' @seealso Used in \code{\link{depletion}} examples. #' diff --git a/R/SMBassWB.R b/R/SMBassWB.R index 43dff147..346d32b5 100644 --- a/R/SMBassWB.R +++ b/R/SMBassWB.R @@ -44,7 +44,7 @@ #' @concept Weisberg LGM #' @concept Back-Calculation #' -#' @source Data from the linear growth modeling software distributed in support of Weisberg, S. 1993. Using hard-part increment data to estimate age and environmental effects. Canadian Journal of Fisheries and Aquatic Sciences 50:1229-1237. +#' @source Data from the linear growth modeling software distributed in support of Weisberg, S. 1993. Using hard-part increment data to estimate age and environmental effects. Canadian Journal of Fisheries and Aquatic Sciences 50:1229-1237. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/SMBassWB.csv}{CSV file} #' #' @seealso Used in \code{\link{capHistSum}} and \code{\link{mrClosed}} examples. Also see \code{wblake} from \pkg{alr4} for the same dataset with only the \code{agecap}, \code{lencap}, and \code{radcap} variables. #' @keywords datasets diff --git a/R/SpotVA1.R b/R/SpotVA1.R index ccc2ba02..df32e57d 100644 --- a/R/SpotVA1.R +++ b/R/SpotVA1.R @@ -25,7 +25,7 @@ #' #' @seealso Used in \code{\link{vbFuns}}, \code{\link{vbStarts}}, and \code{\link{nlsTracePlot}} examples. Also see \code{\link[FSAdata]{SpotVA2}} in \pkg{FSAdata} for related data. #' -#' @source Extracted from Table 1 in Chapter 8 (Spot) of the VMRC Final Report on Finfish Ageing, 2002 by the Center for Quantitative Fisheries Ecology at Old Dominion University. +#' @source Extracted from Table 1 in Chapter 8 (Spot) of the VMRC Final Report on Finfish Ageing, 2002 by the Center for Quantitative Fisheries Ecology at Old Dominion University. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/SpotVA1.csv}{CSV file} #' #' @keywords datasets #' diff --git a/R/Summarize.R b/R/Summarize.R index 2898d6ac..9a14ff18 100644 --- a/R/Summarize.R +++ b/R/Summarize.R @@ -27,7 +27,7 @@ #' #' @return A named vector or data frame (when a quantitative variable is separated by one or two factor variables) of summary statistics for numeric data. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso See \code{\link[base]{summary}} for related one dimensional functionality. See \code{\link[base]{tapply}}, \code{summaryBy} in \pkg{doBy}, \code{\link[psych]{describe}} in \pkg{psych}, \code{describe} in \pkg{prettyR}, and \code{basicStats} in \pkg{fBasics} for similar \dQuote{by} functionality. #' @@ -232,4 +232,4 @@ iSummarizeQf <- function(tmp,digits,na.rm,exclude,nvalid,percZero) { } ## Return the result res -} \ No newline at end of file +} diff --git a/R/WR79.R b/R/WR79.R index f419c02f..3f10dde7 100644 --- a/R/WR79.R +++ b/R/WR79.R @@ -22,7 +22,7 @@ #' #' @concept Age-Length Key #' -#' @source Simulated from Table 2A in Westerheim, S.J. and W.E. Ricker. 1979. Bias in using age-length key to estimate age-frequency distributions. Journal of the Fisheries Research Board of Canada. 35:184-189. +#' @source Simulated from Table 2A in Westerheim, S.J. and W.E. Ricker. 1979. Bias in using age-length key to estimate age-frequency distributions. Journal of the Fisheries Research Board of Canada. 35:184-189. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/WR79.csv}{CSV file} #' #' @keywords datasets #' diff --git a/R/WSlit.R b/R/WSlit.R index 3964b37f..6255517f 100644 --- a/R/WSlit.R +++ b/R/WSlit.R @@ -40,7 +40,7 @@ #' #' @section IFAR Chapter: 8-Condition. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' @source Most of these equations can be found in Neumann, R.M., C.S. Guy, and D.W. Willis. 2012. Length, Weight, and Associated Indices. Chapter 14 in Zale, A.V., D.L. Parrish, and T.M. Sutton, editors. Fisheries Techniques. American Fisheries Society, Bethesda, MD. #' diff --git a/R/WhitefishLC.R b/R/WhitefishLC.R index 1b6a1044..f2b62dca 100644 --- a/R/WhitefishLC.R +++ b/R/WhitefishLC.R @@ -35,7 +35,7 @@ #' @concept Ageing Error #' @concept Age Comparisons #' -#' @source Data from Herbst, S.J. and J.E. Marsden. 2011. Comparison of precision and bias of scale, fin ray, and otolith age estimates for lake whitefish (\emph{Coregonus clupeaformis}) in Lake Champlain. Journal of Great Lakes Research. 37:386-389. Contributed by Seth Herbst. \bold{Do not use for other than educational purposes without permission from the author.} [Was (is?) from http://www.uvm.edu/rsenr/emarsden/documents/Herbst\%20and\%20Marsden\%20whitefish\%20age\%20structure\%20comparison.pdf.] +#' @source Data from Herbst, S.J. and J.E. Marsden. 2011. Comparison of precision and bias of scale, fin ray, and otolith age estimates for lake whitefish (\emph{Coregonus clupeaformis}) in Lake Champlain. Journal of Great Lakes Research. 37:386-389. Contributed by Seth Herbst. \bold{Do not use for other than educational purposes without permission from the author.} \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/WhitefishLC.csv}{CSV file} #' #' @seealso Used in \code{\link{ageBias}} and \code{\link{agePrecision}} examples. #' diff --git a/R/addZeroCatch.R b/R/addZeroCatch.R index 594d48c3..e20fac2e 100644 --- a/R/addZeroCatch.R +++ b/R/addZeroCatch.R @@ -19,13 +19,13 @@ #' @param na.rm A logical that indicates if rows where \code{specvar} that are \code{NA} should be removed after adding the zeros. See details. #' @return A data.frame with the same structure as \code{df} but with rows of zero observation data appended. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 2-Basic Data Manipulations #' #' @seealso \code{complete} in \pkg{tidyr} package. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' @keywords manip #' @@ -221,4 +221,4 @@ addZeroCatch <- function(df,eventvar,specvar,zerovar,na.rm=TRUE) { ## puts the order of the variables back to the original order df[,dfnames] } -} \ No newline at end of file +} diff --git a/R/ageComparisons.R b/R/ageComparisons.R index 026d9adf..a4b82705 100644 --- a/R/ageComparisons.R +++ b/R/ageComparisons.R @@ -19,10 +19,10 @@ #' \item R Number of age estimates given in \code{formula}. #' \item PercAgree The percentage of fish for which all age estimates perfectly agree. #' \item ASD The average (across all fish) standard deviation of ages within a fish. -#' \item ACV The average (across all fish) coefficient of variation of ages within a fish using the \bold{mean} as the divisor. See the \href{http://derekogle.com/IFAR/}{IFAR chapter} for calculation details. +#' \item ACV The average (across all fish) coefficient of variation of ages within a fish using the \bold{mean} as the divisor. See the \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{IFAR chapter} for calculation details. #' \item ACV2 The average (across all fish) coefficient of variation of ages within a fish using the \bold{median} as the divisor. This will only be shown if R>2 or \code{show.prec2=TRUE}. #' \item AAD The average (across all fish) absolute deviation of ages within a fish. -#' \item APE The average (across all fish) percent error of ages within a fish using the \bold{mean} as the divisor. See the \href{http://derekogle.com/IFAR/}{IFAR chapter} for calculation details. +#' \item APE The average (across all fish) percent error of ages within a fish using the \bold{mean} as the divisor. See the \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{IFAR chapter} for calculation details. #' \item APE2 The average (across all fish) percent error of ages within a fish using the \bold{median} as the divisor. This will only be shown if R>2 or \code{show.prec2=TRUE}. #' \item AD The average (across all fish) index of precision (D). #' } @@ -57,13 +57,13 @@ #' #' @section Testing: Tested all precision results against published results in Herbst and Marsden (2011) for the \code{\link{WhitefishLC}} data and the results for the \code{\link[FSAdata]{AlewifeLH}} data set from \pkg{FSAdata} against results from the online resource at http://www.nefsc.noaa.gov/fbp/age-prec/. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 4-Age Comparisons. #' #' @seealso See \code{\link{ageBias}} for computation of the full age agreement table, along with tests and plots of age bias. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' Beamish, R.J. and D.A. Fournier. 1981. A method for comparing the precision of a set of age determinations. Canadian Journal of Fisheries and Aquatic Sciences 38:982-983. [Was (is?) available from http://www.pac.dfo-mpo.gc.ca/science/people-gens/beamish/PDF_files/compareagecjfas1981.pdf.] #' @@ -414,13 +414,13 @@ summary.agePrec <- function(object,what=c("precision","difference", #' #' @section Testing: Tested all symmetry test results against results in Evans and Hoenig (2008), the McNemar and Evans-Hoenig results against results from \code{\link[fishmethods]{compare2}} in \pkg{fishmethods}, and all results using the \code{\link[FSAdata]{AlewifeLH}} data set from \pkg{FSAdata} against results from the online resource at http://www.nefsc.noaa.gov/fbp/age-prec/. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso See \code{\link{agePrecision}} for measures of precision between pairs of age estimates. See \code{\link[fishmethods]{compare2}} in \pkg{fishmethods} for similar functionality. See \code{\link{plotAB}} for a more traditional age-bias plot. #' #' @section IFAR Chapter: 4-Age Comparisons. \bold{Note that \code{plot} has changed since IFAR was published. Some of the original functionality is in \code{\link{plotAB}}.} #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' Campana, S.E., M.C. Annand, and J.I. McMillan. 1995. Graphical and statistical methods for determining the consistency of age determinations. Transactions of the American Fisheries Society 124:131-138. [Was (is?) available from http://www.bio.gc.ca/otoliths/documents/Campana\%20et\%20al\%201995\%20TAFS.pdf.] #' @@ -1189,7 +1189,7 @@ iDiffBA <- function(x,xlab,ylab,xlim,ylim,yaxt,xaxt, #' #' @return Nothing, but see details for a description of the plot that is produced. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso See \code{\link{ageBias}} and its plot method for what I consider a better age-bias plot; \code{\link{agePrecision}} for measures of precision between pairs of age estimates; and \code{\link[fishmethods]{compare2}} in \pkg{fishmethods} for similar functionality. #' diff --git a/R/alkIndivAge.R b/R/alkIndivAge.R index 5e84fc31..6cf24e7f 100644 --- a/R/alkIndivAge.R +++ b/R/alkIndivAge.R @@ -13,7 +13,7 @@ #' @param key A numeric matrix that contains the age-length key. The format of this matrix is important. See details. #' @param formula A formula of the form \code{age~length} where \code{age} generically represents the variable that will contain the estimated ages once the key is applied (i.e., should currently contain no values) and \code{length} generically represents the variable that contains the known length measurements. If only \code{~length} is used, then a new variable called \dQuote{age} will be created in the resulting data frame. #' @param data A data.frame that minimally contains the length measurements and possibly contains a variable that will receive the age assignments as given in \code{formula}. -#' @param type A string that indicates whether to use the semi-random (\code{type="SR"}, default) or completely-random (\code{type="CR"}) methods for assigning ages to individual fish. See the \href{http://derekogle.com/IFAR/}{IFAR chapter} for more details. +#' @param type A string that indicates whether to use the semi-random (\code{type="SR"}, default) or completely-random (\code{type="CR"}) methods for assigning ages to individual fish. See the \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{IFAR chapter} for more details. #' @param breaks A numeric vector of lower values that define the length intervals. See details. #' @param seed A single numeric that is given to \code{set.seed} to set the random seed. This allows repeatability of results. #' @@ -21,13 +21,13 @@ #' #' @section Testing: The \code{type="SR"} method worked perfectly on a small example. The \code{type="SR"} method provides results that reasonably approximate the results from \code{\link{alkAgeDist}} and \code{\link{alkMeanVar}}, which suggests that the age assessments are reasonable. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com}. This is largely an R version of the SAS code provided by Isermann and Knight (2005). +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com}. This is largely an R version of the SAS code provided by Isermann and Knight (2005). #' #' @section IFAR Chapter: 5-Age-Length Key. #' #' @seealso See \code{\link{alkAgeDist}} and \code{\link{alkMeanVar}} for alternative methods to derived age distributions and mean (and SD) values for each age. See \code{\link{alkPlot}} for methods to visualize age-length keys. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' Isermann, D.A. and C.T. Knight. 2005. A computer program for age-length keys incorporating age assignment to individual fish. North American Journal of Fisheries Management, 25:1153-1160. [Was (is?) from http://www.tandfonline.com/doi/abs/10.1577/M04-130.1.] #' @@ -215,4 +215,4 @@ iAgeKey.CR <- function(key,age.cats,data,ca) { data[i,ca] <- sample(age.cats,1,prob=age.prob) } data -} \ No newline at end of file +} diff --git a/R/alkPlot.R b/R/alkPlot.R index 9d0c243d..a6ae65ea 100644 --- a/R/alkPlot.R +++ b/R/alkPlot.R @@ -29,13 +29,13 @@ #' #' @return None, but a plot is constructed. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 5-Age-Length Key. #' #' @seealso See \code{\link{alkIndivAge}} for using an age-length key to assign ages to individual fish. See \code{\link[grDevices]{hcl.colors}} for a simple way to choose other colors. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' @keywords plot #' diff --git a/R/alkSummaries.R b/R/alkSummaries.R index e462df3c..cacad224 100644 --- a/R/alkSummaries.R +++ b/R/alkSummaries.R @@ -21,13 +21,13 @@ #' #' @section Testing: The results from this function perfectly match the results in Table 8.4 (left) of Quinn and Deriso (1999) using \code{\link[FSAdata]{SnapperHG2}} from \pkg{FSAdata}. The results also perfectly match the results from using \code{\link[fishmethods]{alkprop}} in \pkg{fishmethods}. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 5-Age-Length Key. #' #' @seealso See \code{\link{alkIndivAge}} and related functions for a completely different methodology. See \code{\link[fishmethods]{alkprop}} from \pkg{fishmethods} for the exact same methodology but with a different format for the inputs. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' Lai, H.-L. 1987. Optimum allocation for estimating age composition using age-length key. Fishery Bulletin, 85:179-185. #' @@ -121,13 +121,13 @@ iALKAgeProp <- function(p_jgi,l_i,n_i,N) { #' #' @section Testing: The results of these functions have not yet been rigorously tested. The Bettoli and Miranda (2001) results appear, at least, approximately correct when compared to the results from \code{\link{alkIndivAge}}. The Quinn and Deriso (1999) results appear at least approximately correct for the mean values, but do not appear to be correct for the SE values. Thus, a note is returned with the Quinn and Deriso (1999) results that the SE should not be trusted. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 5-Age-Length Key. #' #' @seealso See \code{\link{alkIndivAge}} and related functions for a completely different methodology. See \code{\link{alkAgeDist}} for a related method of determining the proportion of fish at each age. See the \pkg{ALKr} package. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' Bettoli, P. W. and Miranda, L. E. 2001. A cautionary note about estimating mean length at age with subsampled data. North American Journal of Fisheries Management, 21:425-428. #' @@ -269,4 +269,4 @@ iALKMean.QD <- function(key,formula,data,N_i) { rownames(res) <- NULL message("The 'se' values should not be trusted!") res -} \ No newline at end of file +} diff --git a/R/bootstrap.R b/R/bootstrap.R index 393f5300..d732a63f 100644 --- a/R/bootstrap.R +++ b/R/bootstrap.R @@ -39,7 +39,7 @@ #' #' \code{predict} returns a matrix with one row and three columns, with the first column holding the predicted value (i.e., the median prediction) and the last two columns holding the approximate confidence interval. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso \code{\link[car]{Boot}} in \pkg{car}. #' @@ -183,7 +183,7 @@ hist.boot <- function(x,same.ylim=TRUE,ymax=NULL, #' #' \code{predict} returns a matrix with one row and three columns, with the first column holding the predicted value (i.e., the median prediction) and the last two columns holding the approximate confidence interval. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso \code{\link[car]{Boot}} and related methods in \pkg{car} and \code{summary.\link[nlstools]{nlsBoot}} in \pkg{nlstools}. #' diff --git a/R/capHistConvert.R b/R/capHistConvert.R index 419eb3d1..cb22f144 100644 --- a/R/capHistConvert.R +++ b/R/capHistConvert.R @@ -90,13 +90,13 @@ #' #' @section Warning: \code{capHistConvert} may give unwanted results if the data are \code{in.type="event"} but there are unused levels for the variable, as would result if the data.frame had been subsetted on the event variable. The unwanted results can be corrected by using \code{droplevels} before \code{capHistConvert}. See the last example for an example. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 9-Abundance from Capture-Recapture Data. #' #' @seealso See \code{\link{capHistSum}} to summarize \dQuote{individual} capture histories into a format usable in \code{\link{mrClosed}} and \code{\link{mrOpen}}. Also see \pkg{Rcapture}, \code{RMark}, or \pkg{marked} packages for handling more complex analyses. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' @keywords manip #' diff --git a/R/capHistSum.R b/R/capHistSum.R index 4862defa..3936cd97 100644 --- a/R/capHistSum.R +++ b/R/capHistSum.R @@ -35,13 +35,13 @@ #' \item \code{m.array} A matrix that contains the the so-called \dQuote{m-array}. The first column contains the number of fish captured on the ith event. The columns labeled with \dQuote{cX} prefix show the number of fish originally captured in the ith row that were captured in the Xth event. The last column shows the number of fish originally captured in the ith row that were never recaptured. #' } #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 9-Abundance from Capture-Recapture Data. #' #' @seealso See \code{\link[Rcapture]{descriptive}} in \pkg{Rcapture} for \code{m.array} and some of the same values in \code{sum}. See \code{\link{capHistConvert}} for a descriptions of capture history data file formats and how to convert between them. See \code{\link{mrClosed}} and \code{\link{mrOpen}} for how to estimate abundance from the summarized capture history information. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' Baillargeon, S. and Rivest, L.-P. (2007). Rcapture: Loglinear models for capture-recapture in R. Journal of Statistical Software, 19(5):1-31. #' diff --git a/R/catchCurve.R b/R/catchCurve.R index 09f024a8..8c29c5ba 100644 --- a/R/catchCurve.R +++ b/R/catchCurve.R @@ -42,13 +42,13 @@ #' #' @section Testing: Tested the results of catch curve, both unweighted and weighted, against the results in Miranda and Bettoli (2007). Results for Z and the SE of Z matched perfectly. Tested the unweighted results against the results from \code{agesurv} in \pkg{fishmethods} using the \code{rockbass} data.frame in \pkg{fishmethods}. Results for Z and the SE of Z matched perfectly. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 11-Mortality. #' #' @seealso See \code{\link[fishmethods]{agesurv}} in \pkg{fishmethods} for similar functionality. See \code{\link{chapmanRobson}} and \code{\link[fishmethods]{agesurvcl}} in \pkg{fishmethods} for alternative methods to estimate mortality rates. See \code{\link{metaM}} for empirical methods to estimate natural mortality. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' Maceina, M.J., and P.W. Bettoli. 1998. Variation in Largemouth Bass recruitment in four mainstream impoundments on the Tennessee River. North American Journal of Fisheries Management 18:998-1003. #' @@ -314,4 +314,4 @@ iCheck_ages2use <- function(ages2use,ages) { } ## return the ROWS (not the ages) to use rows2use -} \ No newline at end of file +} diff --git a/R/chapmanRobson.R b/R/chapmanRobson.R index fb9366aa..c5097e4f 100644 --- a/R/chapmanRobson.R +++ b/R/chapmanRobson.R @@ -42,13 +42,13 @@ #' #' Tested the results against the results from \code{agesurv} in \pkg{fishmethods} using the \code{rockbass} data.frame in \pkg{fishmethods}. Results for Z and the SE of Z matched perfectly for non-bias-corrected results. The estimate of Z, but not the SE of Z, matched for the bias-corrected (following Smith et al. (2012)) results. \pkg{FSA} uses equation 2 from Smith et al. (2012) whereas \pkg{fishmethods} appears to use equation 5 from the same source to estimate the SE of Z. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 11-Mortality. #' #' @seealso See \code{\link[fishmethods]{agesurv}} in \pkg{fishmethods} for similar functionality. See \code{\link{catchCurve}} and \code{\link[fishmethods]{agesurvcl}} in \pkg{fishmethods} for alternative methods. See \code{\link{metaM}} for empirical methods to estimate natural mortality. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' Chapman, D.G. and D.S. Robson. 1960. The analysis of a catch curve. Biometrics. 16:354-368. #' @@ -284,4 +284,4 @@ plot.chapmanRobson <- function(x,pos.est="topright",cex.est=0.95,round.est=c(3,1 ############################################################## # INTERNAL FUNCTIONS ############################################################## -# Note that iCheck_age2use() is in catchCurve() \ No newline at end of file +# Note that iCheck_age2use() is in catchCurve() diff --git a/R/depletion.R b/R/depletion.R index 3f7293d5..4bbcdd7c 100644 --- a/R/depletion.R +++ b/R/depletion.R @@ -48,13 +48,13 @@ #' #' The Leslie and DeLury methods match the results of Ricker (1975) for No and Q but not for the CI of No (Ricker used a very different method to compute CIs). #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @section IFAR Chapter: 10-Abundance from Depletion Data. #' #' @seealso See \code{\link{removal}} for related functionality and \code{\link[fishmethods]{deplet}} in \pkg{fishmethods} for similar functionality. #' -#' @references Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +#' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. #' #' Ricker, W.E. 1975. Computation and interpretation of biological statistics of fish populations. Technical Report Bulletin 191, Bulletin of the Fisheries Research Board of Canada. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/1485.pdf.] #' @@ -289,4 +289,4 @@ plot.depletion <- function(x,xlab=NULL,ylab=NULL, "\nq=",round(x$est["q","Estimate"],4)), cex=cex.est,bty="n") } -} # nocov end \ No newline at end of file +} # nocov end diff --git a/R/dunnTest.R b/R/dunnTest.R index 773fee23..c771418b 100644 --- a/R/dunnTest.R +++ b/R/dunnTest.R @@ -41,7 +41,7 @@ #' #' @seealso See \code{kruskal.test}, \code{\link[dunn.test]{dunn.test}} in \pkg{dunn.test}, \code{posthoc.kruskal.nemenyi.test} in \pkg{PMCMR}, \code{kruskalmc} in \pkg{pgirmess}, and \code{kruskal} in \pkg{agricolae}. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com}, but this is largely a wrapper (see details) for \code{\link[dunn.test]{dunn.test}} in \pkg{dunn.test} written by Alexis Dinno. +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com}, but this is largely a wrapper (see details) for \code{\link[dunn.test]{dunn.test}} in \pkg{dunn.test} written by Alexis Dinno. #' #' @references #' Dunn, O.J. 1964. Multiple comparisons using rank sums. Technometrics 6:241-252. @@ -164,4 +164,4 @@ print.dunnTest <- function(x,dunn.test.results=FALSE,...) { # nocov start ## Prints the result as if it came from dunn.test() from dunn.test package cat(paste(x$dtres,"\n")) } -} # nocov end \ No newline at end of file +} # nocov end diff --git a/R/expandCounts.R b/R/expandCounts.R index 6e2161b9..4689033d 100644 --- a/R/expandCounts.R +++ b/R/expandCounts.R @@ -20,7 +20,7 @@ #' #' @return A data.frame of the same structure as \code{data} except that the variable in \code{cform} may be deleted and the variable in \code{new.name} may be added. The returned data.frame will have more rows than \code{data} because of the potential addition of new individuals expanded from the counts in \code{cform}. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso See \code{\link{expandLenFreq}} for expanding length frequencies where individual fish measurements were made on individual fish in a subsample and the remaining fish were simply counted. #' diff --git a/R/expandLenFreq.R b/R/expandLenFreq.R index d8a990fe..011edec3 100644 --- a/R/expandLenFreq.R +++ b/R/expandLenFreq.R @@ -19,7 +19,7 @@ #' #' @return Returns a vector that consists of measurements for the non-measured individuals in the entire sample. #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @seealso See \code{\link{expandCounts}} for expanding more than just lengths or expanding lengths when there is a known number in each length bin. See \code{\link{lencat}} for creating length bins. #' diff --git a/R/extraTests.R b/R/extraTests.R index 6436b494..4cfac6fe 100644 --- a/R/extraTests.R +++ b/R/extraTests.R @@ -27,7 +27,7 @@ #' \item \code{Pr(>F)}, \code{Pr(>Chisq)} The corresponding p-value. #' } #' -#' @author Derek H. Ogle, \email{derek@@derekogle.com} +#' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' #' @keywords htest #' @@ -228,4 +228,4 @@ iChkComplexModel <- function(sim,com) { simDF <- unlist(lapply(sim,stats::df.residual)) comDF <- stats::df.residual(com) if (!all(comDF +## FSA (Fisheries Stock Assessment) -The **FSA** package provides R functions to conduct typical introductory fisheries analyses. Example analyses that use **FSA** can be found in the [Introductory Fisheries Analyses with R book](http://derekogle.com/IFAR/) (*see note below*) and on [the *Examples* page](http://derekogle.com/fishR/examples/) of the [fishR website](http://derekogle.com/fishR/). You can browse documentation for functions in **FSA** under the *References* tab and recent changes under the *News* tab at [this page](https://fishr-core-team.github.io/FSA/). Please [cite **FSA**](https://fishr-core-team.github.io/FSA//authors.html) if you use **FSA** in a publication. +The **FSA** package provides R functions to conduct typical introductory fisheries analyses. Example analyses that use **FSA** can be found in the [Introductory Fisheries Analyses with R book](https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r) (*see note below*) and on the [fishR website](https://fishr-core-team.github.io/fishR/). Please [cite **FSA**](https://fishr-core-team.github.io/FSA//authors.html) if you use **FSA** in a publication. + +  ### Installation -The [most recent stable version (on CRAN)](https://cloud.r-project.org/package=FSA) of **FSA** may be installed with +The [most recent stable version](https://cloud.r-project.org/package=FSA) from CRAN may be installed with ```r install.packages("FSA") ``` -The most recent development version may be installed from GitHub with +The [development version](https://github.com/fishR-Core-Team/FSA) may be installed from GitHub with ```r if (!require('remotes')) install.packages('remotes'); require('remotes') remotes::install_github('fishR-Core-Team/FSA') ``` -You may need to have R Tools installed on your system to install the development version from GitHub. See the instructions for ([R Tools for Windows](https://cran.r-project.org/bin/windows/Rtools/) or [R Tools for Mac OS X](https://cran.r-project.org/bin/macosx/tools/)). +You may need R Tools installed on your system to install the development version from GitHub. See the instructions for ([R Tools for Windows](https://cran.r-project.org/bin/windows/Rtools/) or [R Tools for Mac OS X](https://cran.r-project.org/bin/macosx/tools/)). +  ### Questions / Comments / Problems or Contributions Report questions, comments, or bug reports on the [issues page](https://github.com/fishR-Core-Team/FSA/issues). @@ -36,6 +31,19 @@ We are always looking for others to contribute to **FSA**. Please feel free to m Please adhere to the [Code of Conduct](https://fishr-core-team.github.io/FSA/CODE_OF_CONDUCT.html). +  ### Note about **FSA** and *Introduction to Fisheries Analysis with R* book Versions of **FSA** beginning with v0.9.0 may no longer work as shown in the IFAR book. Many functions have not changed from when the book was published, but some have. Thus, you will need to install an **FSA** version before v0.9.0 to be assured that functions work as described in the IFAR book. + + +  + +[![Project Status: Active - The project has reached a stable, usable state and is being actively developed.](http://www.repostatus.org/badges/latest/active.svg)](http://www.repostatus.org/#active) +[![DOI](https://zenodo.org/badge/18348400.svg)](https://zenodo.org/badge/latestdoi/18348400) +[![CRAN Version](http://www.r-pkg.org/badges/version/FSA)](http://www.r-pkg.org/pkg/FSA) +[![License](http://img.shields.io/badge/license-GPL%20%28%3E=%202%29-brightgreen.svg?style=flat)](http://www.gnu.org/licenses/gpl-2.0.html) +[![R-CMD-check](https://github.com/fishR-Core-Team/FSA/workflows/R-CMD-check/badge.svg)](https://github.com/fishR-Core-Team/FSA/actions) +[![Codecov test coverage](https://codecov.io/gh/fishR-Core-Team/FSA/branch/master/graph/badge.svg)](https://codecov.io/gh/fishR-Core-Team/FSA?branch=master) +[![CRAN RStudio mirror downloads rate](http://cranlogs.r-pkg.org/badges/FSA) +![CRAN RSTudio mirror downloads total](http://cranlogs.r-pkg.org/badges/grand-total/FSA)](http://www.r-pkg.org/pkg/FSA) diff --git a/_pkgdown.yml b/_pkgdown.yml index e1750e88..aa36bcd0 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -10,8 +10,8 @@ authors: Alexis Dinno: template: - params: - bootswatch: spacelab + bootstrap: 5 + bootswatch: flatly development: mode: release @@ -148,14 +148,13 @@ reference: - FSA-defunct navbar: - type: inverse left: - text: Reference href: reference/index.html - - text: News - href: news/index.html right: - - icon: fa-home fa-2x + - icon: fa-newspaper + href: news/index.html + - icon: fa-home fa href: index.html - - icon: fa-github fa-2x + - icon: fa-github fa href: https://github.com/fishR-Core-Team/FSA diff --git a/docs/LICENSE-text.html b/docs/LICENSE-text.html index 3be96918..13548f0f 100644 --- a/docs/LICENSE-text.html +++ b/docs/LICENSE-text.html @@ -1,55 +1,56 @@ -License • FSALicense • FSA + + Skip to contents -
-
+
+
+
-
-
GNU GENERAL PUBLIC LICENSE
@@ -393,29 +394,21 @@ 

License

Public License instead of this License.
-
- - - -
+
- -
+
- diff --git a/docs/authors.html b/docs/authors.html index 4c4754b4..1635200b 100644 --- a/docs/authors.html +++ b/docs/authors.html @@ -1,58 +1,59 @@ -Authors and Citation • FSAAuthors and Citation • FSA + + Skip to contents -
-
-
-
- + +
+
+
+
+
+

Authors

  • Derek Ogle. Author, maintainer. @@ -71,42 +72,36 @@

    Authors


    Provided base functionality of dunnTest()

-
-
-

Citation

- Source: inst/CITATION -
-
+
+

Citation

+

Source: inst/CITATION

-

Ogle, D.H., J.C. Doll, P. Wheeler, and A. Dinno. 2022. FSA: Fisheries Stock Analysis. R package version 0.9.3.9000, https://fishr-core-team.github.io/FSA/.

-
@Manual{,
+      

Ogle, D.H., J.C. Doll, P. Wheeler, and A. Dinno. 2022. FSA: Fisheries Stock Analysis. R package version 0.9.3.9000, https://fishr-core-team.github.io/FSA/.

+
@Manual{,
   title = {FSA: Fisheries Stock Analysis},
   author = {Derek H. Ogle and Jason C. Doll and Powell Wheeler and Alexis Dinno},
   year = {2022},
   note = {R package version 0.9.3.9000},
   url = {https://fishr-core-team.github.io/FSA/},
 }
- -
- -
- +
+
-
+
- diff --git a/docs/index.html b/docs/index.html index a00ebbde..01ba9bcf 100644 --- a/docs/index.html +++ b/docs/index.html @@ -4,7 +4,8 @@ - + + Simple Fisheries Stock Assessment Methods • FSA @@ -12,12 +13,11 @@ - - - + + + - - + - + + Skip to contents -
-
-
- + +
+
+
+

 

-

FSA (Fisheries Stock Assessment) +

FSA (Fisheries Stock Assessment)

-

The FSA package provides R functions to conduct typical introductory fisheries analyses. Example analyses that use FSA can be found in the Introductory Fisheries Analyses with R book (see note below) and on the Examples page of the fishR website. You can browse documentation for functions in FSA under the References tab and recent changes under the News tab at this page. Please cite FSA if you use FSA in a publication.

+

The FSA package provides R functions to conduct typical introductory fisheries analyses. Example analyses that use FSA can be found in the Introductory Fisheries Analyses with R book (see note below) and on the fishR website. Please cite FSA if you use FSA in a publication.

+

 

Installation

-

The most recent stable version (on CRAN) of FSA may be installed with

+

The most recent stable version from CRAN may be installed with

-

The most recent development version may be installed from GitHub with

+install.packages("FSA")
+

The development version may be installed from GitHub with

-if (!require('remotes')) install.packages('remotes'); require('remotes')
-remotes::install_github('fishR-Core-Team/FSA')
-

You may need to have R Tools installed on your system to install the development version from GitHub. See the instructions for (R Tools for Windows or R Tools for Mac OS X).

+if (!require('remotes')) install.packages('remotes'); require('remotes') +remotes::install_github('fishR-Core-Team/FSA')
+

You may need R Tools installed on your system to install the development version from GitHub. See the instructions for (R Tools for Windows or R Tools for Mac OS X).

+

 

Questions / Comments / Problems or Contributions @@ -102,18 +101,18 @@

Questions / Comments / P

Report questions, comments, or bug reports on the issues page.

We are always looking for others to contribute to FSA. Please feel free to make a pull request via GitHub or to contact the maintainers.

Please adhere to the Code of Conduct.

+

 

Note about FSA and Introduction to Fisheries Analysis with R book

Versions of FSA beginning with v0.9.0 may no longer work as shown in the IFAR book. Many functions have not changed from when the book was published, but some have. Thus, you will need to install an FSA version before v0.9.0 to be assured that functions work as described in the IFAR book.

+

 

+

Project Status: Active - The project has reached a stable, usable state and is being actively developed. DOI CRAN Version License R-CMD-check Codecov test coverage CRAN RStudio mirror downloads rateCRAN RSTudio mirror downloads total

-
- - -
- diff --git a/docs/news/index.html b/docs/news/index.html index 6ca0fb21..d9b32907 100644 --- a/docs/news/index.html +++ b/docs/news/index.html @@ -1,73 +1,87 @@ -Changelog • FSAChangelog • FSA + + Skip to contents -
-
-
- +
+
+
- +

FSA 0.9.3.9000

+
  • Changes related to moving to fishR-Core-Team
    • Updated sticker.
    • +
    • Changed DHO e-mail address (in DESCRIPTION and in all author fields of the documentation). Partially address #86.
    • Updated pkgdown.yaml GitHub action to v2. Changed action to only run on a release (rather than a push) but it can be run manually as well.
    • Updated R-CMD-check.yaml GitHub action to v2. Note that I had to add the extra code for dealing with graphics on the Mac version.
    • +
  • +
  • Changes related to new fishR webpage +
    • Updated links in fishR(), FSA(), and README.md. Partially address #86.
    • +
    • Updated all links to Introductory Fisheries Analyses with R book.
    • +
    • Added links to CSV files for all data sets. This addresses #96.
    • +
    • Changed theme in _pkgdown.yml to match that of FSAdata and more closely match fishR.
    • +
    • Removed most recent dates from NEWS file as pkgdown picks up the CRAN release date to add.
    • +
  • +
  • +alkIndivAge(): Modified. Added a catch for NAs in the length sample. Also added a test. This addresses #88.
  • +
  • +confint.boot(): Modified. Changed hard-coding of columns that contained the confidence interval values to find those columns by grep()ing for the % sign. This fixes an issue related to car::Confint() returning the coef() results for functions that have a coef() method but not for those that do not. Also updated tests to use results from car::Boot() rather than the old car::bootCase().
- +

FSA 0.9.3

CRAN release: 2022-02-18

  • Moved dplyr from imports to suggests (needed because functions were removed in last version; however it is still used in some examples; partially addresses #87).
  • Removed sciplot from imports (because functions were removed in last version; partially addresses #87).
  • Updated tests for ksTest() to handle issues on the CRAN M1 build machine (per e-mail from Prof. Ripley on 15-Feb-22; partially addresses #87).
  • Updated all links to the droglenc github that were related to FSA or FSAdata to be to the fishR-Core-Team github.
- +

FSA 0.9.2 12-Feb-21

CRAN release: 2022-02-12

  • Last version maintained by Derek Ogle. Transferring to fishR Core Team for next version.
  • filterD(): REMOVED (to FSAmisc).
  • @@ -91,11 +105,11 @@
- +

FSA 0.9.1

CRAN release: 2021-07-17

  • Corrected testing issue for catchCurve() and chapmanRobson() as directed by CRAN on 17-Jul-21. Issue likely caused by changes to fishmethods package.
- +

FSA 0.9.0

CRAN release: 2021-06-09

  • Make note of the several removed (now defunct) and deprecated (soon to be defunct) functions listed below.
  • Added Jason Doll as an AUThor.
  • Moved plyr from Imports to Suggests.
  • @@ -175,7 +189,7 @@
- +

FSA 0.8.32

CRAN release: 2021-01-15

  • Removed Travis-CI and appveyor.
  • No longer using coveralls for coverage statistics. Changed to codecov.io.
  • Added GitHub Action for CI/CD (used usethis::use_github_action_check_standard()).
  • @@ -197,7 +211,7 @@
- +

FSA 0.8.31

CRAN release: 2020-11-08

  • Now using roxygen v7.1.1.
  • Added tibble to suggests (see comment about headtail() below).
  • Cleaned up the documentation of parameters for RichardsFuns() (documentation did not change when parameter letters were changed for the Age and Growth book).
  • @@ -206,8 +220,10 @@
- -
  • Started using rhub::check_for_cran() for checking before sending to CRAN.
  • +

    FSA 0.8.30

    CRAN release: 2020-03-09

    +
    • +Date: 9-Mar-20
    • +
    • Started using rhub::check_for_cran() for checking before sending to CRAN.
    • Updated tests for Summarize() and ksTest() that used data.frame(). This should have been done with v0.8.28.
    • Fixed errors for tests in ksTest() that were identified using R-hub.
    • Removed all links to documentation in non-dependent or non-suggested packages. This removes a note from R-hub.
    • @@ -215,16 +231,20 @@
- -
  • Removed dependency on epitools package as it may soon be orphaned. See changes to binCI() and poiCI() outlined below.
  • +

    FSA 0.8.29

    +
    • +Date: 8-Mar-20
    • +
    • Removed dependency on epitools package as it may soon be orphaned. See changes to binCI() and poiCI() outlined below.
    • binCI(): Modified. Added internal functions that are based on (but not identical to) functions in the epitools package which will possibly be deprecated soon (per note from CRAN on 7-Mar-20).
    • poiCI(): Modified. Added internal functions that are based on (but not identical to) functions in the epitools package which will possibly be deprecated soon (per note from CRAN on 7-Mar-20).
- +

FSA 0.8.28

  • +Date: 28-Feb-20
  • +
  • fitPlot(): Modified. Changed so that lines are plotted after the points in the IVR versions.
  • ksTest(): Modified. Changed documentation examples to handle R’s new way of handling stringsAsFactors= (per request from CRAN on 27-Feb-20).
  • @@ -232,7 +252,7 @@
- +

FSA 0.8.27

CRAN release: 2020-02-03

  • Now using ROxygen2 7.0.2.
  • Removed dependency on gplots package as it is now orphaned. Required adding iRichColors() internal function.
  • @@ -241,7 +261,7 @@
- +

FSA 0.8.26

CRAN release: 2019-11-22

  • Changed to depending on R >=3.5.0, because that is the latest version required by a package (i.e., car) that FSA imports or suggests. Used the “check_r_versions_of_package_dependencies” shiny app by “ateucher” (on Github) to help determine this.
  • Removed asbio package from suggests as it hung up Travis-CI build (because of the need for the TCLTK package).
  • @@ -254,7 +274,7 @@
- +

FSA 0.8.25 24-Jul-19

CRAN release: 2019-07-24

  • agePrecision(): Modified. Changed so that PE2 and CV2 use the median in the entire calculation rather than just in the denominator.
  • @@ -265,11 +285,11 @@
- +

FSA 0.8.24 17-May-19

CRAN release: 2019-05-21

  • Corrected misuses of \concept in Rd files per CRAN request.
- +

FSA 0.8.23 1-May-19

CRAN release: 2019-05-02

  • Reorganized testthat folder as suggested in testthat release notes.
  • Removed all uses of Subset() (replaced with filterD()).
  • @@ -284,12 +304,12 @@
- +

FSA 0.8.22

CRAN release: 2018-11-22

  • Corrected CITATION file.
  • Updated tests for changes in the fishmethods package (vblrt() replaced with growthlrt() and T= replaced with TC= in M.empirical()) per CRAN request.
- +

FSA 0.8.21

CRAN release: 2018-11-03

  • Added a webpage. Setup Travis-CI to handle updates (See this).
  • Added a hex sticker logo.
  • Added withr to Imports (see usages below).
  • @@ -337,7 +357,7 @@
- +

FSA 0.8.20

CRAN release: 2018-05-18

  • Added asbio, DescTools, nlme, and psych packages to Suggests because they are used in tests (and as will soon be required by CRAN … per an e-mail from CRAN on 17-May-18).
  • Fixed a bunch of bad links to other packages in the documentation.
  • Removed the “Date” field from the Description file.
  • @@ -353,7 +373,7 @@
- +

FSA 0.8.19

CRAN release: 2018-04-08

  • addZeroCatch(): Modified. Changed two 1:nrow() structures to seq_len(nrow()) (partially addressing #36).
  • @@ -406,8 +426,10 @@
- -
  • Changed to depending on R >=3.2.0, because that is the latest version required by a package (i.e., car) that FSA imports or suggests. Used the “check_r_versions_of_package_dependencies” shiny app by “ateucher” (on Github) to help determine this.
  • +

    FSA 0.8.18

    +
    • +Date: 31-Mar-18
    • +
    • Changed to depending on R >=3.2.0, because that is the latest version required by a package (i.e., car) that FSA imports or suggests. Used the “check_r_versions_of_package_dependencies” shiny app by “ateucher” (on Github) to help determine this.
    • Using latest testthat package.
    • bootCase(): Added. This was added because bootCase() will soon be removed from the car package. It was added so that the code in the Introductory Fisheries Analyses with R book will still work. It is largely a wrapper to Boot() in car with method="case". The documentation was updated somewhat.
    • @@ -435,21 +457,23 @@
- +

FSA 0.8.17

CRAN release: 2017-10-29

  • dunnTest(): Modified. Adjusted code to handle the addition of altp= to and modified output from dunn.test() in dunn.test. Added additional tests and corrected some issues in the documentation.
  • GompertzFuns(): Modified. Fixed error in message (i.e., msg=TRUE) for param="Ricker2".
- +

FSA 0.8.16

CRAN release: 2017-09-07

  • Need to resubmit v0.8.15 to CRAN, so bumped the version.
  • growthFunShow(): Modified. Fixed error in expression for type="Logistic" and param="CampanaJones1".
- -
  • Added a script to the helpers directory that will test that all required packages are installed.
  • +

    FSA 0.8.15

    +
    • +Date: 6-Sep-17
    • +
    • Added a script to the helpers directory that will test that all required packages are installed.
    • iAddOutlierTestResults(): Modified. Fixed bug related to point labels in residPlot() when the data.frame for the original model had NA values.
    • @@ -458,7 +482,7 @@
- +

FSA 0.8.14

CRAN release: 2017-07-27

  • Moved dunn.test and lmtest to imports to help with portability for workshops.
  • ageBias(): Modified. Fixed bug in plot() so that the tick marks on the marginal histograms match the tick marks on the main plot. Changed the default hist.panel.size= in plot() so that it more reliably prints the values on the axes of the marginal histograms.
  • @@ -468,7 +492,7 @@
- +

FSA 0.8.13

CRAN release: 2017-04-29

  • ageBias(): Modified. A complete rebuild of plot. Major changes are to add plotAB() which is primarily used to make the “legacy” age bias plots of Campana, removal of the “sunflower” plot option, new sets of defaults for many of the arguments that reflect my preferences for visualizing age comparisons (which includes defaulting to plotting differences in ages), addition of the ability to add marginal histograms (xHist=, yHist=, col.hist=, and hist.panel.size=), better handling of axis ticks and labels (primarily to show ticks at integers and make sure 0 is included for differences), and allowing the ability to add “summary layers” to the main plot (see allowAdd=). Many examples were added. Some functionality from previous versions will be broken.
  • @@ -509,7 +533,7 @@
- +

FSA 0.8.12

CRAN release: 2017-03-12

  • Lots of spelling corrections after running devtools::spell_check().
  • Cleaned up some issues in the testing files that were caused by a new version of fishmethods and changes to R v3.4.0.
  • @@ -522,7 +546,7 @@
- +

FSA 0.8.11

CRAN release: 2016-12-13

  • Changed all stop()s to STOP()s and all warning()s to WARN(). This modified nearly all functions.
  • Changed all paste()s that used sep="" to paste0()s.
  • Removed several sep=""s from message()s.
  • @@ -602,7 +626,7 @@
- +

FSA 0.8.10

CRAN release: 2016-09-24

  • alkIndivAge(): Modified. Added na.rm=TRUE to the checks on the minimum and maximum length data.
  • @@ -633,7 +657,7 @@
- +

FSA 0.8.9

CRAN release: 2016-08-23

  • ageComparison(): Modified. Removed an internal call to fact2num() because of changes to Summarize() below. Should not impact user experience.
  • @@ -646,7 +670,7 @@
- +

FSA 0.8.8

CRAN release: 2016-07-18

  • growthFunShow(): Modified. Added Pauly et al. (1992) seasonal cessation function. Added case= for use with Schnute model.
  • @@ -655,7 +679,7 @@
- +

FSA 0.8.7

CRAN release: 2016-05-08

  • Compiled under R v3.3.0.
  • Removed relax from Suggests. See srStarts() and vbStarts() notes below. This addresses #17.
  • Removed gdata from Imports. See filterD() and Subset() notes below. This addresses #5.
  • @@ -708,7 +732,7 @@
- +

FSA 0.8.6

CRAN release: 2016-03-25

  • Fixed problems with tests, and made the tests more explicit, related to PSD and Wr functions. Suppressed some warnings related to sumTable() in ALK related tests and Summarize() in age comparisons tests. Prompted by forthcoming changes to testthat.
  • Removed News.md from .Rbuildignore (apparently now supported by CRAN).
  • @@ -735,7 +759,7 @@
- +

FSA 0.8.5

CRAN release: 2016-02-14

  • Added URL for fishR webpage in DESCRIPTION per CRAN request. Removed it from the URL field in DESCRIPTION.

  • Updated all references to Ogle (2016) in documentation.

  • ageBias(): Modified. Minor corrections to the documentation.

  • @@ -747,7 +771,7 @@
- +

FSA 0.8.4

CRAN release: 2015-12-21

  • Now using Roxygen2 v5.0.1.
  • Removed some requireNamespaces() from some functions and moved those packages from Suggests to Imports so that those functions would work better with other packages. The only requireNamespaces() that remain are related to functions that require the relax package (so tcltk is not installed until needed) and knitr, dunn.test, and lmtest as these are unlikely to be used by other packages and will keep the packages that are loaded with FSA to a minimum. Packages moved from Suggests to Depends are Hmisc (for use in binCI), gdata (for use in filterD() and Subset()), dplyr (for use in filterD()), sciplot (for use in fitPlot()), car (for use in residPlot()), and gplots (for use with colors).
  • @@ -762,12 +786,14 @@
- +

FSA 0.8.3

CRAN release: 2015-10-23

  • Removed vignetteBuilder from DESCRIPTION (remnant from a vignette I built and then removed) at request of CRAN.
- -
  • Converted all files in data-raw to CSV files.
  • +

    FSA 0.8.2

    +
    • +Date: 22-Oct-15
    • +
    • Converted all files in data-raw to CSV files.
    • Removed all \href{}{} and \url{} codes to websites that I don’t control. The addresses are now “naked” such that the user will need to copy-and-paste them into a browser to view the web page rather than clicking on a hyper link. Hopefully this will eliminate problems with R CMD CHECK.
    • ChinookArg: Updated help documentation.
    • @@ -781,9 +807,8 @@
- -
- -
  • -Submitted to CRAN (had an error in the tests for Sparc-Solaris).
  • -
  • Added suggests for dunn.test for use in dunnTest() (see below).
  • +

    FSA 0.8.0 8

    CRAN release: 2015-10-08

    +
- -
  • Converted all .txt files to .Rda files. Original .txt files are in the data-raw directory which was added to .Rbuildignore.
  • +

    FSA 0.7.11

    +
    • +Date: Oct15
    • +
    • Converted all .txt files to .Rda files. Original .txt files are in the data-raw directory which was added to .Rbuildignore.
- +

FSA 0.7.10

  • +Date: Oct15
  • +
  • purl2(): Added newname= to allow the output file to have a name other than the same as the intput file.
  • reproInfo(): Added markdown to the out= types.
- -
  • Updated README.md and DESCRIPTION for new websites.
  • +

    FSA 0.7.9

    +
    • +Date: Sep15
    • +
    • Updated README.md and DESCRIPTION for new websites.
    • Changed all references to the WordPress site to the new website. Removed links to specific IFAR chapters. Changed my e-mail address. Created link in references to IFAR book page.
    • fishR(): Modified. Updated for the new websites.
- +

FSA 0.7.8

  • +Date: Sep15
  • +
  • ageComparison(): Modified. Changed what="McNemars" and what="Bowkers" to what="McNemar" and what="Bowker". Fixed bug if all ages are NA.
  • catchCurve(): Modified. Fixed bug related to NA values in the catch vector.
  • @@ -902,8 +933,10 @@
- +

FSA 0.7.7

  • +Date: Aug15
  • +
  • ageBias(): Modified. Changed default for pch.mean= to 95 (from 175). If what= has only one item, then results will now be invisibly returned so that results can be saved to an object.
  • agePrecision(): Modified. Added trunc.diff=. If what= has only one item, then results will now be invisibly returned so that results can be saved to an object.
  • @@ -915,15 +948,19 @@
- +

FSA 0.7.6

  • +Date: Aug15
  • +
  • Summarize(): Modified. Converted to using iHndlFormula(). Changed output for quantitative data (validn is always returned, NAs is never returned). Changed output for two-way factor data (not returned as a character from formatC()). Removed ... from code in several places as it was buggy and not used. Added more checks and modified check messages. Fixed bug from when a 1-d matrix of characters was sent. Added tests.
  • sumTable(): Modified. Converted to using iHndlFormula(). Added tests.
- +

FSA 0.7.5

  • +Date: Aug15
  • +
  • addRadCap(): Modified. Streamlined code. Changed default in.pre= to NULL (from inc). Added some tests for returned data.
  • BluegillLM: Removed. Moved to FSAdata.
  • @@ -933,8 +970,10 @@
- +

FSA 0.7.4

  • +Date: Aug15
  • +
  • binCI(): Modified. Check forHmisc with requireNamespaces() before processing body of function. This allowed moving Hmisc into Suggests declarations rather than Imports.
  • chooseColors(): Modified. Check forgplots with requireNamespaces() before processing body of function. This allowed moving gplots into Suggests declarations rather than Imports.
  • @@ -959,14 +998,18 @@
- -
  • Removed all importFrom() directives and went to hard-wiring to packages with ::. Added imports() directives for stats, graphics, tools, and grDevices. Removed imports() directive for multcomp().
  • +

    FSA 0.7.3

    +
    • +Date: Aug15
    • +
    • Removed all importFrom() directives and went to hard-wiring to packages with ::. Added imports() directives for stats, graphics, tools, and grDevices. Removed imports() directive for multcomp().
    • vbStarts(): Modified. Changed default methos for methEV=. Changed order of starting values for type="Mooij" in order to match that from vbFuns(). This also fixed a bug when dynamicPlot=TRUE was used with type="Mooij". Added tests to determine if parameter order is the same between vbStarts() and vbFuns() for all parameterizations.
- +

FSA 0.7.2

  • +Date: Jul15
  • +
  • ageBias(): Modified. Corrected bug with labeling of x-axis on age-bias plot when ref.lab= and nref.lab= were not given by the user. Changed default for nYpos= from 1.1 to 1.03. Added cex.n= to allow control of the size of the sample size labels.
  • agePrecision(): Modified. Changed what="detail" to what="details". Note that what="detail" still works.
  • @@ -980,8 +1023,10 @@
- +

FSA 0.7.1

  • +Date: Jul15
  • +
  • ageBias(): Modified. Moved into a single file with agePrecision(). Cleaned-up help file. No change in behavior.
  • agePrecision(): Modified. Moved into a single file with ageBias(). Cleaned-up help file. No change in behavior.
  • @@ -1035,8 +1080,10 @@
- -
  • Fixed description to be in title case.
  • +

    FSA 0.7.0

    +
    • +Date: Jul15
    • +
    • Fixed description to be in title case.
    • Fixed reference to fishR page in description file.
    • Fixed several URL references, deleted others that have changed and are no longer available.
    • Updated CITATION file (to remove CRAN note).
    • @@ -1046,8 +1093,10 @@
- +

FSA 0.6.25

  • +Date: Jul15
  • +
  • alkPrep(): Deleted. Moved to FSAmisc package (on GitHub).
  • changesPos(): Deleted. Moved to FSAmisc package (on GitHub).
  • @@ -1079,8 +1128,10 @@
- +

FSA 0.6.24

  • +Date: Jun15
  • +
  • alkIndivAge(): Modified. Switched to using iHndlFormula() at the beginning. Added more checks and tests.
  • expandLenFreq(): Modified. Added more checks. Added some tests.
  • @@ -1088,8 +1139,10 @@
- +

FSA 0.6.23

  • +Date: Jun15
  • +
  • ageBias(): Modified. Fixed bugs related to axes on numbers plot and sunflower plot.
  • filterD(): Modified. Added reorder=FALSE to drop.levels() so that the order of levels is not changed when levels are dropped.
  • @@ -1097,8 +1150,10 @@
- +

FSA 0.6.22

  • +Date: Jun15
  • +
  • extraSS(): Modified. Added sim.name= to allow for a common typing mistake.
  • logbtcf(): Modified. Slight change to handle a check of lm class.
  • @@ -1112,8 +1167,10 @@
- +

FSA 0.6.21

  • +Date: Jun15
  • +
  • addRadCap(): Modified. Modified so that in.pre= string must be at the start of the variable names. Added a check for when the in.pre= string does not exist at the start of any variable names. Added a check for whether all in.var= variables exist. Added some simple tests (need more).
  • bcFuns(): Modified. Removed type=; BCM= can now be either numeric or a string. Allowed string to be in any case (will be converted to the required all upper-case). Corrected some errors for when msg=TRUE. Added some simple tests.
  • @@ -1123,8 +1180,10 @@
- +

FSA 0.6.20

  • +Date: Jun15
  • +
  • gompFuns(): Deleted.
  • gompModels(): Deleted.
  • @@ -1152,15 +1211,19 @@
- +

FSA 0.6.19

- -
  • Changed nearly all “messages” using cat() to using message() so that they can be suppressed with suppressMessage() or message=FALSE in knitr. See “One comment on messages” at http://yihui.name/knitr/demo/output/. Specific functions modified are listed below.
  • +

    FSA 0.6.18

    +
    • +Date: Jun15
    • +
    • Changed nearly all “messages” using cat() to using message() so that they can be suppressed with suppressMessage() or message=FALSE in knitr. See “One comment on messages” at http://yihui.name/knitr/demo/output/. Specific functions modified are listed below.
    • ageBias(): Modified. Changed all cat() to message()s. Changed so that messages (result headers) are only printed if what= contains more than one item.
    • @@ -1193,8 +1256,10 @@
- +

FSA 0.6.17

  • +Date: Jun15
  • +
  • extraSS(): Modified. Added more message tests and some calculational tests (compared to anova() results).
  • gompFuns(): Modified. Changed all cat()s to message()s and slightly modified the messages. Fixed minor bugs in some created functions. Created some tests.
  • @@ -1206,8 +1271,10 @@
- +

FSA 0.6.16

  • +Date: Jun15
  • +
  • extraSS(): Modified. Added sim_names= and com_name= so that simple descriptive names could be given to the model and printed in the heading of the output. Added checks for whether the complex model appears more complex or not. Added tests for warning and error messages.
  • fishR(): Modfiied. Fixed bug with where="news". Added tests.
  • @@ -1219,8 +1286,10 @@
- +

FSA 0.6.15

  • +Date: Jun15
  • +
  • addZeroCatch(): Modified. Deleted extraneous print() statement.
  • lencat(): Modified. Major re-write to make it easier to trouble-shoot. Fixed bug related to empty category on end when as.fact=TRUE and use.names=TRUE. Added more tests.
  • @@ -1228,8 +1297,10 @@
- -
  • Added travis-ci integration.
  • +

    FSA 0.6.14

    +
    • +Date: May15
    • +
    • Added travis-ci integration.
    • Added coveralls integration.
    • Added importFrom for mapvalues() from plyr.
    • @@ -1268,16 +1339,20 @@
- -
  • Some miscellaneous reorganizations of files.
  • +

    FSA 0.6.13

    +
    • +Date: May15
    • +
    • Some miscellaneous reorganizations of files.
    • ageBias(): Modified. Corrected bugs with show.pts=TRUE and “sunflower plot” that came from changes made in version 0.5.1.
    • residPlot(): Modified. Deleted student=. Added resid.type= which allows used of standardized (internally studentized) and (externally) studentized residuals for linear models (along with raw residuals). Added code following nlsResiduals() from nlstools for standardized residuals for nonlinear models.
- +

FSA 0.6.12

  • +Date: May15
  • +
  • gompFuns(): Added.
  • gompModels(): Added.
  • @@ -1291,8 +1366,10 @@
- +

FSA 0.6.11

  • +Date: Apr15
  • +
  • kCounts(): Added. Was swvCounts().
  • kPvalue(): Added. Was swvPvalue().
  • @@ -1318,16 +1395,20 @@
- -
  • Compiling under R 3.2.0.
  • +

    FSA 0.6.10

    +
    • +Date: Apr15
    • +
    • Compiling under R 3.2.0.
    • Added some cross-reference links to help files.
    • Remove fishR vignette section and added IFAR Chapter section to help files.
    • fishR(): Modified. Added IFAR as an option. Updated code to be more simple.
- -
  • Last version for submission of first draft of Introductory Fisheries Analyses with R.
  • +

    FSA 0.6.5

    +
    • +Date: Apr15
    • +
    • Last version for submission of first draft of Introductory Fisheries Analyses with R.
    • capHistConvert(): Modified. Added a warning section and an example of problems that can occur if the data are in event format but the event variable contains unused levels as may occur following subsetting. Thanks to Joseph Feldhaus for pointing out this problem.
    • @@ -1338,20 +1419,26 @@
- -
  • Changed to using LazyData: true.
  • +

    FSA 0.6.4

    +
    • +Date: Apr15
    • +
    • Changed to using LazyData: true.
    • se(): Added. Removed importFrom of se() from sciplot.
- -
  • Some modifications to tests.
  • +

    FSA 0.6.3

    +
    • +Date: Apr15
    • +
    • Some modifications to tests.
    • plot.capHist(): Modified. Changed default plot look which can now be controlled with pch=, cex.pch=, and lwd=. Modified the two y-axis scales to use plotmath characters.
- +

FSA 0.6.2

  • +Date: Mar15
  • +
  • capHistConvert(): Modified. Streamlined code around creating var.lbls. Made event the default value for var.lbls.pre=. Added some checks to var.lbls.pre= if it starts with a number or has too many values. Added cols2use= and modified use of cols2ignore= via iHndlCols2use().
  • capHistSum(): Modified.
  • @@ -1359,8 +1446,10 @@
- +

FSA 0.6.1

  • +Date: Mar15
  • +
  • catchCurve(): Modified. Changed how ages2use= was handled so that negative values can be used to exclude some ages. Will also now send an error if a mix of positive and negative ages are sent in ages2use=. Better handled the situation where ages2use= had more ages than the age variable. Checked for non-positive weights if weighted=TRUE and returned a warning and changed the non-positive weights to the minimum of the positive weights.
  • chapmanRobson(): Modified. Changed how ages2use= was handled so that negative values can be used to exclude some ages. Will also now send an error if a mix of positive and negative ages are sent in ages2use=. Better handled the situation where ages2use= had more ages than the age variable.
  • @@ -1370,8 +1459,10 @@
- -
  • updated DESCRIPTION file (following this – http://r-pkgs.had.co.nz/description.html +

    FSA 0.6.0

    +
    • +Date: Mar15
    • +
    • updated DESCRIPTION file (following this – http://r-pkgs.had.co.nz/description.html
    • srFuns(): Modified. Changed function returned when simplify=FALSE so that if the parameters are named that the name is dropped. Thus, when the function is used, the returned result will not be annoyingly named as the first parameter. Added functionality for the “density-independence” model.
    • @@ -1381,8 +1472,10 @@
- +

FSA 0.5.3

  • +Date: Mar15
  • +
  • growthModelSim(): Deleted. The simulation functionality was moved to the FSAsim package. The functionality related to finding starting values for the von Bertalanffy modesl was moved to vbStarts().
  • srFuns(): Modified. A complete rebuild to make similar to vbFuns(). Added simple=. Added type='Shepherd' for the Shepherd (1982) three parameter model and type='SailaLorda' for the “Saila-Lorda” three parameter model from Iles (1994). Added tests for error messages.
  • @@ -1396,25 +1489,33 @@
- +

FSA 0.5.2

  • +Date: Mar15
  • +
  • psdPlot(): Modified. Fixed bug related to NAs in max.brks variable.
- +

FSA 0.5.1

  • +Date: Mar15
  • +
  • ageBias(): Modified. Reversed the order of the formula … it is now nrefvar~refvar. This more closely matches other R functions where the tilde may be interpreted as the word “by”. In other words, the formula now reads as “nonreference variable by reference variable” (i.e., Y by X). Thanks for Richard McBride for the suggestion. Modified the age-bias plot extensively … added sfrac= and defaulted to 0 to remove ends of the confidence intervals, added cex.mean= to control the size of the symbol for the mean point, added lwd= that will controland set all of the lwd defaults to 1.
  • agePrecision(): Modified. Changed all “CV” results to “ACV”.
- +

FSA 0.4.51

  • +Date: Mar15
  • +
  • catchCurve(): Modified. Updated the help file regarding zmethod="Smithetal".
- +

FSA 0.4.50

  • +Date: Mar15
  • +
  • capFirst(): Modified. Added a check to make sure the inputted object was either a character or factor class. Added code to return the object as the same class of the original object.
  • lencat(): Modified. Added a catch for bad choices of arguments. Added a catch to send a warning if the vector contains all NA values (this happens when lencat() is used within a loop or as part of psdAdd()). Added tests for error and warning messages. Changed how the formula was handled in the formula method.
  • @@ -1424,30 +1525,40 @@
- +

FSA 0.4.49

  • +Date: Mar15
  • +
  • expandCounts(): Modified. Made message regarding rows with zero counts more useful. Added missing counts to the catch of zero counts. Made changes to handle more “odd” data entries (see “details” in the help file). Made some tests. Added some tests.
- +

FSA 0.4.48

  • +Date: Mar15
  • +
  • psdCalc(): Modified. Corrected “bug” with units=. Also modified warning message when no “stock” fish were present in the data.frame to further note what units= were used (i.e., this problem is likely to happen if the data is inches but the user uses the default units='mm'). Thanks to S. Mather for inspring this fix.
- +

FSA 0.4.47

  • +Date: Feb15
  • +
  • dunnTest(): Modified. Corrected “bug” in the order that the groups are subtracted (i.e., they were flipped).
- +

FSA 0.4.46

  • +Date: Feb15
  • +
  • catchCurve(): Modified. Changed default for pos.est= to topright. Added cex.pos= (and set default to slightly smaller value).
  • chapmanRobson(): Modified. Changed default for pos.est= to topright. Added cex.pos= (and set default to slightly smaller value).
- +

FSA 0.4.45

  • +Date: Feb15
  • +
  • hist.formula(): Modified. Changed use of par() to eliminate modifications to the gridding of plots after the function is complete. Also removed the setting of mar= and mgp= in par().
  • mrOpen(): Modified. Removed pretty printing for summary() and confint() methods. These got in the way of being able to cbind() the results together for a succinct display.
  • @@ -1455,8 +1566,10 @@
- +

FSA 0.4.44

  • +Date: Feb15
  • +
  • .onAttach(): Modified. Centered the message and adjusted for different lengths of version numbers.
  • alkPlot(): Modified. Fixed bug when using add=TRUE with type="bubble".
  • @@ -1478,20 +1591,26 @@
- +

FSA 0.4.43

  • +Date: Feb15
  • +
  • mrOpen(): Modified. Changed summary() and confint() methods to allow single, multiple, or all choices of parameters to return results for. Also added code to print the results more prettily.
  • swvCode(): Modified. Fixed bug related to blanks='extra'.
- +

FSA 0.4.42

- +

FSA 0.4.41

  • +Date: Jan15
  • +
  • catchCurve(): Modified. Removed the use of larger points in the plot().
  • chapmanRobson(): Modified. Removed the use of larger points in the plot().
  • @@ -1501,27 +1620,35 @@
- +

FSA 0.4.40

  • +Date: Jan15
  • +
  • lencat(): Modified. Fixed a bug that occurred if breaks= were given but the vector contained NAs. Thanks to Ben Neely for pointing this out.
- +

FSA 0.4.39

  • +Date: Jan15
  • +
  • catchCurve(): Modified. Changed use.weights= to weighted=. Added some checks for the formula in the formula version and for the variables in the default version. Add unit tests for warnings and errors and tow tests for values.
  • chapmanRobson(): Modified. Added the method="Smithetal" methodology for estimating the SE of Z (and made it the default). Added some checks for the formula in the formula version and for the variables in the default version. Added verbose= to summary(). Add unit tests for warnings and errors and two tests for values.
- +

FSA 0.4.38

  • +Date: Jan15
  • +
  • alkPlot(): Modified. Changed behavior for adding a legend to alleviate a bug.
  • metaM(): Added.
- +

FSA 0.4.37

  • +Date: Jan15
  • +
  • confint.nlsBoot(): Modified. Changed default for err.col= to black from red. Fixed example due to changes in nlsBoot package.
  • extraSS(): Modified. Added a catch to make sure all models are of the same type. Added a catch to note that the function does not work with other that lm() or nls() models. Fixed a bug related to the labels for results from anova() being different depending on whether lm() or nls() models were given. Added some examples.
  • @@ -1531,8 +1658,10 @@
- +

FSA 0.4.36

  • +Date: Jan15
  • +
  • hist.formula(): Modifiied. Added iaxs=, which when set to the default value of TRUE will use xaxs="i" and yaxs="i" to remove the “floating” x-axis produced by hist() in base R.
  • lwCompPreds(): Modified. Added the yaxs= argument.
  • @@ -1546,18 +1675,24 @@
- +

FSA 0.4.35

- +

FSA 0.4.34

  • +Date: Dec14
  • +
  • addZeroCatch(): Modified. Removed idvar=, forced the eventvar= and speciesvar= variables in the returned data.frame to be numeric if they were numeric in the original data.frame, allowed speciesvar= to have more than one variable, and added na.rm=. Multiple values for specvar= will allow the user to add zeros based on a combination of variables (e.g., species and size category). The na.rm= argument allows the user to remove “missing” species, which are common if some sampling events did not capture any fish.
- +

FSA 0.4.33

  • +Date: Dec14
  • +
  • growthModelSim(): Modified. Changed all “K0” objects to “t50”.
  • headtail(): Added.
  • @@ -1571,8 +1706,10 @@
- +

FSA 0.4.32

  • +Date: Nov14
  • +
  • expandcounts(): Added (from fishWiDNR package).
  • expandLenFreq(): Added. Same as lenFreqExpand() but thought that this name fits better with expandCounts().
  • @@ -1584,8 +1721,10 @@
- -
  • Removed the suggests for plyr.
  • +

    FSA 0.4.31

    +
    • +Date: Nov14
    • +
    • Removed the suggests for plyr.
    • addZeroCatch(): Modified. Slight modifications to help file. Fixed bug related to error checking the number of variables. Added some tests.
    • @@ -1602,8 +1741,10 @@
- -
  • Added a suggests for plyr, for examples using mapvalues().
  • +

    FSA 0.4.30

    +
    • +Date: Oct14
    • +
    • Added a suggests for plyr, for examples using mapvalues().
    • lencat(): Modified. Changed as.fact= to default to same as use.names=. This will result in the same behavior as before. However, it also allows the user to set use.names=TRUE and as.fact=FALSE to return a character vector (that is not a factor).
    • @@ -1614,8 +1755,10 @@
- -
  • Added a suggests for dplyr.
  • +

    FSA 0.4.29

    +
    • +Date: Oct14
    • +
    • Added a suggests for dplyr.
    • Added an external file in inst/extdata for testing PSD and Wr calculations.
    • capFirst(): Modified. Changed words= to which=.
    • @@ -1631,8 +1774,10 @@
- +

FSA 0.4.28

  • +Date: Sep14
  • +
  • psdAdd(): Added.
  • psdDataPrep(): Deleted. Functionality replaced by psdAdd().
  • @@ -1644,8 +1789,10 @@
- +

FSA 0.4.27

  • +Date: Sep14
  • +
  • hist.formula(): Modified. Slight modifications to warning messages.
  • Summarize(): Modified. Slight modification to warning messages.
  • @@ -1661,8 +1808,10 @@
- +

FSA 0.4.26

  • +Date: Sep14
  • +
  • capFirst() Modified. Added an option to handle a vector of strings rather than just a single string.
  • lencat(): Modified. Fixed bug with category names when use.names=TRUE. Moved all internal functions outside of lencat() environment (and renamed them). Cleaned up code.
  • @@ -1684,8 +1833,10 @@
- +

FSA 0.4.25

  • +Date: Sep14
  • +
  • mrClosed(): Modified. Better handled a given value of R=.
  • psdCalc() Modified. Fixed a bug that appeared when no “zero” fish were present in the data. Moved all internal functions outside of psdCalc() environment (and renamed them).
  • @@ -1699,8 +1850,10 @@
- +

FSA 0.4.24

  • +Date: Aug14
  • +
  • ageKey(): Deprecated. See alkIndAge().
  • ageKeyPlot(): Deprecated. See alkPlot().
  • @@ -1724,13 +1877,17 @@
- +

FSA 0.4.23

  • +Date: Aug14
  • +
  • removal(): Modified. Completely modified the code so that the examples with apply() and lapply() would also provide confidence intervals. Also changed the code to reflect that $\sum_{i=1}^{k-1}T_{i}$ from Schnute (1983) is the same as X from Carle and Strub (1978), the $\sum_{i=1}^{k-1}T_{i}-C_{1}$ in Schnute (1983) is the same as X − (k−1)C1, and q in Schnute (1983) is p in most other resources. These changes allowed some efficiencies and connected the theory behind the methods more firmly. Removed the check for character data. Kept the check for whether catch was a vector or not but if catch is a one row or one column matrix or data.frame then it will be converted to a vector to continue. The latter change allows one to extract one row from a data.frame to send to removal() without having to use as.numeric(). Modified and added examples of the use of apply() and lapply().
- +

FSA 0.4.22

  • +Date: Aug14
  • +
  • ageKey(): Modified. Changed to using all.equal() to check if the ALK has rows that don’t sum to 1. This was an attempt to minimize the number of “false negatives” caused by R FAQ 7.31. Changed the check of whether the longest fish in the length sample is greater than the longest length bin in the ALK to whether the longest fish in the length sample is greater than the longest length bin in ALK PLUS the minimum width of length categories. This last change is an attempt to minimize the number of warnings that occur when the longest fish in the length sample would be in the last length category ALK but because the length categories are labelled by their minimum length it looks like it is not. The minimum width is used to still allow unevent length categories and, thus, this check may still produce some “false negatives.”
  • ageKeyPlot(): Modified. Removed bubble.ylab=. Modified ylab= to handle what bubble.ylab= used to handle.
  • @@ -1738,8 +1895,10 @@
- +

FSA 0.4.21

  • +Date: Jul14
  • +
  • depletion(): Modified. Changed type= to method= and added DeLury as an option to method= (and left Delury). Changed ricker.mod= to Ricker.mod=. Added some checking for bad arguments. Created internal functions specific to the Leslie and DeLury methods (for isolation). Modified some clunky code. Added references to specific sections in Seber (2002) for SE equations. Updated examples. Added tests and error checking.
  • coef.depletion(): Modified. Added digits=.
  • @@ -1751,13 +1910,17 @@
- +

FSA 0.4.20

  • +Date: Jul14
  • +
  • removal(): Modified. Made "CarleStrub" the default method. Changed type= to method=. Changed internal meth object to lbl. Moved all internal functions outside of mrOpen() environment and added other internal functions to isolate all intermediate calculations. Added a verbose= and parm= to summary(). Streamlined clunky code in confint() including removing the all and both options from parm=. Added more checks for abd inputs, notes in the code as to sources for the fomulae, and tests.
- -
  • Modified some tests to check whether the suggested package was installed.
  • +

    FSA 0.4.19

    +
    • +Date: Jul14
    • +
    • Modified some tests to check whether the suggested package was installed.
    • capHistSum(): Modified. Changed column and row labels for $methodB.top and column labels for $methodB.bot. Added a m-array object for when more than two sampling events are present. Added calculations for the number of fish first seen on event i (ui), the number of fish last seen on event i (vi), and the number of fish seen i times (fi) to $sum.
    • @@ -1772,8 +1935,10 @@
- -
  • Moved to compiling under R 3.1.1.
  • +

    FSA 0.4.18

    +
    • +Date: Jul14
    • +
    • Moved to compiling under R 3.1.1.
    • Added a Suggests for marked for the example in capHistConvert().
    • ageBias(): Modified. Changed default value of min.n.CI= from 5 to 3. Added an na.rm=TRUE to the min() and max() that produced the age ranges for the age agreement table.
    • @@ -1795,8 +1960,10 @@
- +

FSA 0.4.17

  • +Date: Jul14
  • +
  • confint.mrClosed(): Modified. Moved all internal functions outside of confint.mrClosed() environment (see iCI.MRCMultiple() and iCI.MRCSingle()). Changed ci.type= to just type=. Streamlined binomial method for single census. Used iMRCSingleSE() to get SE for when type="normal" for Chapman, Bailey, and Ricker methods.
  • extraSS(): Modified. Slight change to row labels in output table.
  • @@ -1816,8 +1983,10 @@
- +

FSA 0.4.16

  • +Date: Jul14
  • +
  • BluegillLM: Modified. Added a seealso.
  • residPlot(): Modified. Changed the loess-related methods to use loess(), to put an approximate confident band with the line, the line and band are “under” the points, the line is lighter. Put the horizontal reference line at zero under the points. Made loess=TRUE the default.
  • @@ -1837,8 +2006,10 @@
- -
  • lots of roxygen2 Rd cleaning.
  • +

    FSA 0.4.15

    +
    • +Date: Jun14
    • +
    • lots of roxygen2 Rd cleaning.
    • addLoessLine(): Deleted. Moved functionality to iAddLoessLine() and moved code to residPlot() file..
    • @@ -1944,8 +2115,10 @@
- -
  • added tests (in test_VonB2b.R) to assure that group comparisons of von Bertalanffy parameters equal those in Kimura (1980) and vblrt() in fishmethods.
  • +

    FSA 0.4.14

    +
    • +Date: Jun14
    • +
    • added tests (in test_VonB2b.R) to assure that group comparisons of von Bertalanffy parameters equal those in Kimura (1980) and vblrt() in fishmethods.
    • added importsFrom for lmtest for lrt(). Also used in testing (test_VonB2b.R).
    • confint.nlsBoot(): Modified. Modified the plotting to use hist.formula(), removed par(mar=) definitions, and added err.col= and lwd.col= to control the color and line width of the confidence interval line on the plot.
    • @@ -1965,8 +2138,9 @@
- -
  • added testthat files for error checking of chapmanPlot(), vbFuns(), vbStarts(), and walfordPlot(). Added a testthat file for checking that the von Bertalanffy fitting using vbFuns() and vbStarts() matches other sources.

  • +

    FSA 0.4.13

    +
    • Date: Jun14

    • +
    • added testthat files for error checking of chapmanPlot(), vbFuns(), vbStarts(), and walfordPlot(). Added a testthat file for checking that the von Bertalanffy fitting using vbFuns() and vbStarts() matches other sources.

    • ageBias(): Modified. Deprecated col.lab= and row.lab= and replaced with ref.lab= and nref.lab=. Moved all functions that were internal to main functions to being internal to the package. In the process, I changed the names of the internal functions slightly, made explicit the argument passing, and added internal descriptions of the internal files. Changed several if else strings in the plot method to a switch().

    • agePrecision(): Modified. Changed some messages so they were not as wide.

    • chapmanPlot(): Modified. Removed S3 functions so that vbStarts() has to use a formula. Added some checking related to the formula.

    • @@ -1980,8 +2154,9 @@
- -
  • added Suggests for testthat, fishmethods, FSAdata for testing and popbio for an example that was made “interactive” from “dont run”(see below).

  • +

    FSA 0.4.12

    +
    • Date: May14

    • +
    • added Suggests for testthat, fishmethods, FSAdata for testing and popbio for an example that was made “interactive” from “dont run”(see below).

    • added testthat files for ageBias() and agePrecision().

    • ageBias(): Modified. Removed unit testings from examples and put in the testing file.

    • agePrecision(): Modified. Removed deprecated what="agreement".

    • @@ -1993,15 +2168,16 @@
- -
  • Removed Roxygen directives in DESCRIPTION (with changes to roxygen2 4.0.1).

  • +

    FSA 0.4.11

    +
    • Date: May14

    • +
    • Removed Roxygen directives in DESCRIPTION (with changes to roxygen2 4.0.1).

    • Changed @S3method and @method to @export in the following files according to changes in ROxygen2 as described here, among several other places: ageBias, agePrecision, bootCase, catchCurve, chapmanRobson, confint.nlsboot, depletion, dietOverlap, fitPlot, hist.formula, htest.nlsBoot, ks2d1, ks2d1p, ks2d2, ks2d2p, ksTest, lencat, mrClosed, mrOpen, plotBinResp, predict.nlsBoot, removal, residPlot, srStarts, Subset, Summarize, sumTable, vbStarts, and walfordChapmanPlot.

    • addZeroCatch(): Modified. Added a catch for the situation where no zeros need to be added to the data.frame. Cleaned-up the help file, modified the examples, and added another example. Thanks to Ben Neely for bringing this bug (handling where zeros are not needed) to my attention.

    • capHistSum(): Modified. Cleaned up the code (no changes in functionality).

    • @@ -2012,8 +2188,9 @@
- -
  • Added Roxygen directives to DESCRIPTION.

  • +

    FSA 0.4.10

    +
    • Date: May14

    • +
    • Added Roxygen directives to DESCRIPTION.

    • Updated to Roxygen2 4.0.0 which modified several help files.

    • ageBias(): Modified. Cleaned-up the help file.

    • agePrecision(): Modified. Cleaned-up the help file.

    • @@ -2027,8 +2204,9 @@
- -
  • Removed nlme dependencies (with removal of vbDataGen()).

  • +

    FSA 0.4.9

    +
    • Date: May14

    • +
    • Removed nlme dependencies (with removal of vbDataGen()).

    • ageComp(): Deleted. Fully deprecated. Use ageBias() and agePrecision() instead.

    • cohortSim(): Deleted. Moved to FSAsim package.

    • depletion(): Modified. Remove link to leslieSim().

    • @@ -2044,8 +2222,10 @@
- +

FSA 0.4.8

  • +Date: May14
  • +
  • ageBias(): Modified. Added the ability to use multiple what= arguments with c(). Added what="n" to get the sample size on the age-agreement table. Added nYpos= to plot() to allow control of the position of the sample size values on the plot. Changed the order of the printing of results when what="symmetry" is used in summary(). The order more closely follows the “level of complexity” of the tests. Added unit test examples to the help file.
  • agePrecision(): Modified. Added the ability to use multiple what= arguments with c().
  • @@ -2053,8 +2233,9 @@
- -
  • Removed all of the functions related to constructing and validating standard weight equations. These are now in the FSAWs package. This is the start of an effort to streamline the FSA package.

  • +

    FSA 0.4.7

    +
    • Date: Apr14

    • +
    • Removed all of the functions related to constructing and validating standard weight equations. These are now in the FSAWs package. This is the start of an effort to streamline the FSA package.

    • Removed importFrom quantreg (only used for standard weight methods).

    • ChinookArg: Added (from FSAdata).

    • emp(): Removed.

    • @@ -2069,8 +2250,9 @@
- -
  • Changed to compiling under R 3.1.0

  • +

    FSA 0.4.6

    +
    • Date: Apr14

    • +
    • Changed to compiling under R 3.1.0

    • Imported stackpoly() from plotrix for use in ageKeyPlot().

    • Added concepts (that largely match those in the FSAdata pacakge) to most of the data files.

    • Made some grammatical changes and added author sections to Rd files.

    • @@ -2080,8 +2262,9 @@
- -
  • Converted to using github as a repository.

  • +

    FSA 0.4.5

    +
    • Date: Apr14

    • +
    • Converted to using github as a repository.

    • Changed NEWS to NEWS.md

    • Added ImportFrom for relax package (see below).

    • ageBias(): Modified. Added a plot that shows the number of observations at each combined age. Changed the coding slightly around Bowker’s test (added an internal function) and implemented Evans and Hoenig’s and McNemar’s test. These changes resulting in adding a “table” choice to what= that will print just the age-agreement table. When what="symmetry" is chosen all three ob Bowker’s, McNemar’s, and Evans-Hoenig results will be output as a table. The age-agreement table is no longer printed when what="symmetry". In addition, what="Bowkers", what="EvansHoenig", and what="McNemars" can be used to see the Bowker’s, Evans and Hoenig, and McNemars test results, respectfully. Added a cont.corr= argument for use with McNemars test.

    • @@ -2099,8 +2282,10 @@
- +

FSA 0.4.4

  • +Date: Apr14
  • +
  • ageKeyPrep(): Added.
  • agePrecision(): Modified. Fixed the bug where the APE and CV were over-estimated in situations where the multiple ages agreed at an age=0 (thanks to Richard McBride for pointing out this error).
  • @@ -2108,8 +2293,10 @@
- +

FSA 0.4.3

  • +Date: Mar14
  • +
  • ageBias(): Added. Extracted the age-bias related material from ageComp(). Modified the code to remove unneeded code. From ageComp(), remove the what= argument related to differences and added a difference= argument. Also changed what="bias.diff" to what="diff.bias" to allow for a quicker partial matching (i.e. separate more from what="bias"). Major modifications to how the axis limits are created if none are provided. Modified where the sample size is shown on the age-bias plot. Added the min.n.CI= argument. Added an example using WhitefishLC to be consistent with agePrecision().
  • ageComp(): Modified. Split into ageBias() and agePrecision(). Added a warning that this function is deprecated and will be removed in the future.
  • @@ -2189,8 +2376,9 @@
- -
  • Changed to compiling under R 3.0.2.

  • +

    FSA 0.4.2

    +
    • Date: Dec13

    • +
    • Changed to compiling under R 3.0.2.

    • Removed dependency on reshape package (see changes for emp(), gReshape(), and ssValidate() below) and the relax, tcltk, and TeachingDemos packages (see changes for catchCurveSim(), cohortSim(), growthModelSim(), leslieSim(), lwModelSim(), mrClosed1Sim(), simAgeBias(), simAges(), simLenFromAge(), simLenSelect(), and srSim() below).

    • .onAttach(): Modified. Added notes to use citation().

    • bcFuns(): Modified. Added “BPH” and “SPH” options to type= argument (same as “LBPH” and “LSPH”, respectively). Changed a catch using cat() to using message(). Added some specificity to the help file (more is needed).

    • @@ -2217,8 +2405,9 @@
- -
  • Changed R dependency to >3.0.0 (because gplots package has that dependency).

  • +

    FSA 0.4.1

    +
    • Date: Oct13

    • +
    • Changed R dependency to >3.0.0 (because gplots package has that dependency).

    • Added importFrom for cast(), is.formula(), and melt() in reshape package.

    • capHistConvert(): Corrected the formatting of the documentation.

    • capHistSum(): Corrected the documentation. Added a second example.

    • @@ -2237,12 +2426,13 @@
- -
  • Corrected all pointers to fishR vignettes (because of new webpage).

  • +

    FSA 0.4.0

    +
    • Date: Jun13

    • +
    • Corrected all pointers to fishR vignettes (because of new webpage).

    • Removed importFrom color.scale from plotrix because of changes to discharge() and wetPerim().

    • removed importFrom %nin% from Hmisc. See multiple changes because of this below.

    • -
    • .onAttach(): Added, was .onLoad().

    • -
    • .onLoad(): Deleted, now .onAttach().

    • +
    • .onAttach(): Added, was .onLoad().

    • +
    • .onLoad(): Deleted, now .onAttach().

    • addMargins(): Deleted, moved back to NCStats.

    • addSigLetters(): Deleted, moved back to NCStats.

    • addZeroCatch(): Modified. Changed the looping structure for finding the sampling event and species combinations that need zeros. This should speed things up substantially. Also, modified to allow no idvar= variables. Finally, the returned data frame has the variables (columns) in the same order as the original data frame (rather than having the order modified).

    • @@ -2254,7 +2444,7 @@
- -
  • added special “fishR Vignette” sections with links to several help files.

  • +

    FSA 0.3.4

    +
    • Date: Jan13

    • +
    • added special “fishR Vignette” sections with links to several help files.

    • binCI(): Modified so that result is a matrix rather than sometimes (when only one set of CIs were computed) being a vector.

    • catchCurve(): Modified by minorly adjusting how confint() produced CIs. Also, disallowed using parm= when the user asks for CIs for the linear model. This allowed using match.arg() as a check for appropriate parm= values. Modified the examples in the help file slightly and added an example of using the weighted regression method.

    • plot.catchCurve(): Modified so that log(catch) values less than 0 will be plotted.

    • @@ -2307,11 +2498,12 @@
- -
  • Added ImportFrom for slider() and gslider() from the relax package. Deleted the ImportFrom for slider() from the TeachingDemos package. These functions were the same but it was being deprecated from TeachingDemos.

  • +

    FSA 0.3.3

    +
    • Date: 21Dec12

    • +
    • Added ImportFrom for slider() and gslider() from the relax package. Deleted the ImportFrom for slider() from the TeachingDemos package. These functions were the same but it was being deprecated from TeachingDemos.

    • General: added call.=FALSE to several stop()s and warning()s.

    • General: replaced paste() inside of several cat()s.

    • ageKey(): Modified to use match.arg() with type=.

    • @@ -2348,13 +2540,14 @@
- -
  • Changed R dependency to >2.14.0.

  • +

    FSA 0.3.2

    +
    • Date: 1Dec12

    • +
    • Changed R dependency to >2.14.0.

    • Added a ImportsFrom for knitr (purl() in swvCode() added below).

    • Moved gdata to an ImportsFrom from Suggests. Needed for nobs() in ci.fp1() which is used in fitPlot.ONEWAY and drop.levels() used in the example in RuffeWs.

    • Deleted dependency on FSAdata.

    • Added the following data files from FSAdata: BluegillJL, BluegillLM, BrookTroutTH, CodNorwegian, CutthroatAL, Ecoli, KS2D_NR, LMBassWs, Mirex, PikeNY, PikeNYPartial1, RSDlit, RuffeWs, SMBassLS, SMBassWB, SpotVA1, StripedBass1, VBGMlit, WalleyeGerowLW, WR79, WSlit. This allowed removing the depending on FSAdata.

    • -
    • .onLoad(): modified slightly with a suggestion from Simon Urbanek to eliminate a warning on RCMD Check (that showed up on rforge.net, but not locally).

    • +
    • .onLoad(): modified slightly with a suggestion from Simon Urbanek to eliminate a warning on RCMD Check (that showed up on rforge.net, but not locally).

    • addMargins(): added from NCStats.

    • addSigLetters(): added from NCStats. Modified to allow the use of a result from sigLetters() in lets=.

    • `bootCase methods: added from NCStats. Needed to import bootCase from car.

    • @@ -2365,12 +2558,13 @@
- -
  • Switched to using the Project mode in RStudio.

  • +

    FSA 0.3.1

    +
    • Date: 25Nov12

    • +
    • Switched to using the Project mode in RStudio.

    • Switched to using semantic versioning for the version number (which means that the hyphen before the last number has been replaced by a period).

    • Switched to using roxygen to construct help files.

    • Set some values =NULL to eliminate “global bindings” warning when performing the RCMD check – emp(), pos2adj(), psdVal(), simAgeBias(), srStarts(), vbStarts(), and wsValidate(). This did not work for the WSlit and RSDlit problems in rsdCalc(), rsdPlot(), rsdVal() and wsVal().

    • @@ -2416,8 +2610,9 @@
- -
  • Moved several functions from NCStats that are used quite often for fisheries analyses. Ultimately, I want to remove the dependency to NCStats.

  • +

    FSA 0.3-0

    +
    • Date: 8-Nov-12

    • +
    • Moved several functions from NCStats that are used quite often for fisheries analyses. Ultimately, I want to remove the dependency to NCStats.

    • Deleted an importFrom for gtools, created an internal odd() instead.

    • Added an importFrom for gplots, to get rich.colors() for chooseColors().

    • Added an importFrom and removed an import for NCStats.

    • @@ -2444,8 +2639,9 @@
- -
  • Switched to compiling under R version 2.14.1 (64-bit).

  • +

    FSA 0.2-8

    +
    • Date: 21Jun12

    • +
    • Switched to compiling under R version 2.14.1 (64-bit).

    • Changed license specification from “GPL version 2 or newer” to “GPL (>= 2)” to avoid warning on check.

    • Added a suggestion for gdata to fix warning with capHistConver.rd (see below).

    • `capHistConvert.rd: Modified the examples to not use “gdata::combine” by adding a “require(gdata)” in the examples and suggesting gdata in the description file.

    • @@ -2454,9 +2650,11 @@
- +

FSA 0.2-7

  • -.onLoad(): Modified. Moved the startup message into packageStartupMessage() in hopes of eliminating the warning when checking the package.
  • +Date: 2Mar12 +
  • +.onLoad(): Modified. Moved the startup message into packageStartupMessage() in hopes of eliminating the warning when checking the package.
  • catchCurveSim(): Modified. Changed max.age= to 15 (from 10). Slightly changed the labels related to ‘Z Steady’ and ‘N* `Steady’.
  • @@ -2477,8 +2675,9 @@
- -
  • Switched to compiling under R version 2.13.1 (32-bit).

  • +

    FSA 0.2-6

    +
    • Date: 1Oct11

    • +
    • Switched to compiling under R version 2.13.1 (32-bit).

    • Removed importFroms that were required for updateFSA().

    • Removed splines package from imports list (not needed).

    • capHistConvert(): Modified. Modifications to handle changes to capHistSum().

    • @@ -2490,8 +2689,9 @@
- -
  • Modified description file to show my e-mail address.

  • +

    FSA 0.2-5

    +
    • Date: 19Aug11

    • +
    • Modified description file to show my e-mail address.

    • Added cnvrt.coords() as an ImportFrom TeachingDemos. Needed for simAgeBias() and simLenSelectM().

    • ageKey(): Modified. Length categories in the length sample, if none are provided in len.breaks=, are constructed from the length categories present in the age-length key rather than guessing at a starting value and width and creating evenly spaced categories. This should fix the bug that occurred when an age-length key was originally created with even length categories but the key is so sparse that the length categories with actual data are uneven. Also, changed the error catching so that the routine is stopped if a length in the length sample is smaller than the smallest length category in the age length key but will only elicit a warning if the largest length is greater than the largest length category in the age-length key.

    • chapmanRobson(): Modified. Changed to have a .default and .formula method.

    • @@ -2509,13 +2709,15 @@
- -
  • Switched to compiling under R version 2.13.0.

  • +

    FSA 0.2-4

    +
    • Date: 15Jun11

    • +
    • Switched to compiling under R version 2.13.0.

    • vbFuns(): Modified. Modified Wang’s formulas to be for length increments. Added a length increments version to Faben’s method (“Fabens2”).

- -
  • Updated citation file.

  • +

    FSA 0.2-3

    +
    • Date: 18Apr11

    • +
    • Updated citation file.

    • Added importFrom for tools and utils packages.

    • ageKey(): Modified. Added a len.breaks= argument so that an age-length key with variable widths for the length categories can be used. Added an example to the Rd file to illustrate the use.

    • confint.MRC(): Modified. Replaced numdigs= argument with digits= argument. Retained numdigs= for backwards compatability.

    • @@ -2527,29 +2729,34 @@
- -
  • moved to compling under 2.12.1 (32-bit)

  • +

    FSA 0.2-2

    +
    • Date: 3Mar11

    • +
    • moved to compling under 2.12.1 (32-bit)

    • changed dependency to >2.11.1

    • ageComp(): modified dramatically. Primarily added the ability to test for bias by comparing the mean of the y-structure to the value of the x-structure with t-tests adjusted for multiple comparisons. Modified the code to allow this to happen more efficiently and to output results in the plot() and summary() methods. Also modified the plot() method so that the default is to just show the confidence intervals rather than showing the CIs and the range of the data (use show.rng=TRUE to see the old plot). Also changed the CI colors so that significant differences are shown in red (default) and non-significant differences are shown in blue (default) (set both col.err= and col.err.sig= to the same color to get the old plot).

    • lencat(): modified so that vname=NULL is the default. This will default to using “LCat” as the variable name (as in the previous version). However, modified the way the vname is appended to the new data frame so that if vname already exists in the data frame a new name will be used (vname plus some number).

    • removal(): added just.ests= argument and changed the ests part of the returned value to be a vector rather than a matrix. Both changes allowed for better use of lapply() for computing the removal estimates on more than one group. Changed from an error to a warning for situations where the method could not compute population estimates (i.e., because the population was not depleted). In addition, NAs are returned in situations where population estimates can not be made. An example of computing the removal estimate for more than one group was added to the .rd file. Thanks to Jon Bolland for asking the question that motivated these changes.

- +

FSA 0.2-1

  • +Date: 31-Jan-11
  • +
  • catchCurve(): Modified by adding a formula method. This required moving the original code into a default method and changing the age= argument to x=.
  • lenFreqExpand(): Modified by adding the additional= argument (which required modifying the total= argument and adding an error check for the situation where the total fish to assign lengths is not greater than the number of fish in the measured subsample).
  • -.onLoad(): modified. Changed to include version number of loaded version.
  • +.onLoad(): modified. Changed to include version number of loaded version.
  • vbFuns(): Modified by adding simple= argument. Added a ‘Somers’ seasonal growth oscillations model and ‘Fabens’ model for tag-recapture data. Also added, but did not check, a ‘Laslett’ ‘double von Bertalanffy’ model.
  • vbStarts(): Modified by setting a catch to return a single root for st0 or sL0 if the polynomial root found a double root. Thanks to Giacom* `Tavecchia for identifying this error. Added a ‘Somers’ seasonal growth oscillations model.
- +

FSA 0.2-0

  • +Date: 23-Sep-10
  • +
  • bcFuns(): Added. Still needs to be thoroughly proofed.
  • FSAsims(): Modified to reflect srSim() change described below.
  • @@ -2565,8 +2772,10 @@
- -
  • completed changing naming convention to “camel” type – e.g., stockRecruitModels() rather than stock.recruit.models().
  • +

    FSA 0.1-6

    +
    • +Date: 23-Aug-10
    • +
    • completed changing naming convention to “camel” type – e.g., stockRecruitModels() rather than stock.recruit.models().
    • ageComp(): renamed from age.comp().
    • @@ -2643,8 +2852,9 @@
- -
  • moved to compiling under 2.11.1.

  • +

    FSA 0.1-5

    +
    • Date: 20Aug10

    • +
    • moved to compiling under 2.11.1.

    • started changing my naming convention to “camel” type – e.g., stockRecruitModels() rather than stock.recruit.models(). In this version, I am only changing the functions that I am working on. I will change the rest in the next version.

    • added an importFrom for nlme as groupedData() was needed for vbDataGen().

    • age.key(): Modified the way that the length categories in the age-length key is determined. Previously I just used the rownames found in the key, but this allows lengths with a row of all NA or zeros to be considered as a length found in the age length key. Now the row sums are found and the sums with NaN or 0 are removed. In addition, I added a warning message if the row sums d* `not sum to 1.

    • @@ -2665,13 +2875,16 @@
- +

FSA 0.1-4

  • +Date: 6Jun10
  • +
  • growmodel.sim(): added an option to fit the “original” von Bertalanffy function. Also added more “mis-spelling” options to the other model names.
- -
  • moved to compiling under 2.10.1.

  • +

    FSA 0.1-2

    +
    • Date: 17Dec09

    • +
    • moved to compiling under 2.10.1.

    • `added a dependency to tcltk so that simulators would work properly upon load of FSA.

    • age.comp(): added xlim= and ylim= arguments so user can control x- and y-axis limits if desired. Changed code so that better choices for axis limits are selected automatically if xlim and ylim are both NULL. Changed code so that the “extra” vertical space added when show.n=TRUE AND ylim is NLL is 10 percent of the y-axis range rather than just an extra one unit. Allowed function to work better with xaxt=“n” and yaxt=“n” in case the user wants to create their own axes. Removed a par() setting within the plotting function. Thanks to David A. Hewitt for pointing out the deficiences with the axis labeling.

    • age.key(): corrected how the age column is labeled if the column did not already exist in the data frame. Was also indirectly modified with lencat() modification. Also modified to stop and warn the user if the length sample has fish whose lengths are not present in the length-age key (previously there was a warning, but then ultimately there was an error).

    • @@ -2685,8 +2898,10 @@
- -
  • added a namespace
  • +

    FSA 0.1-1

    +
    • +Date: 15Apr09
    • +
    • added a namespace
    • removed dependencies and changed to imports … left plotrix and quantreg as dependencies (they do not have a namespaces). left reshape as a dependency because of it’s dependency on plyr.
    • .FirstLib(): removed (changed to .onLoad() because of namespace).
    • @@ -2708,15 +2923,18 @@
- +

FSA 0.0-14

  • +Date: 20Dec08
  • +
  • age.comp(): streamlined code (put bias and difference plots in same function, used grconvertY for show.n, used plotCI for range intervals, caught and corrected CI problems when n=1 or SD=0). N* `functionality difference, just improved code.
  • growmodel.sim(): modified by determining some of the slider values from the data when x= and y= arguments are not null. This makes the graph more useful for determining starting values in nls() modeling.
- -
  • added a dependency to quantreg (for rq() in emp()).

  • +

    FSA 0.0-13

    +
    • Date: 6Dec08

    • +
    • added a dependency to quantreg (for rq() in emp()).

    • added CITATION file.

    • age.comp(): modified the plot() function by adding a ‘difference’ method to the what= argument. This allows creation of an “age-difference” plot as used in Muir et al. (2008).

    • caphist.convert(): modified by adding an event.ord= argument to allow the user to identify the order of the event names when converting from a capture-by-event type. This is particulary useful if the event names are things like ‘first’, ‘second’, ‘third’, ‘fourth’ because R orders these alphabetically which adversely effects the correctness of the capture histories.

    • @@ -2732,8 +2950,10 @@
- +

FSA 0.0-12

  • +Date: 15Jul08
  • +
  • .First.lib: Added
  • add.zerocatch(): added this function to add zeros to catch records where a species of fish was not caught.
  • @@ -2749,8 +2969,9 @@
- -
  • Moved to RForge.net.

  • +

    FSA 0.0-11

    +
    • Date: 15May08

    • +
    • Moved to RForge.net.

    • changed to R2.7.0.

    • added a dependency to Rcapture (for the example in caphist.convert()).

    • anova.RLP(): added this function to produce the anova table for the standard weight equation.

    • @@ -2761,8 +2982,10 @@
- +

FSA 0.0-10

  • +Date: 1May08
  • +
  • lencat(): Modified by adding an as.fact= argument that allows the user to decide if the resulting variable should be returned as a factor variable or not. The default is set to return as a factor variable. This allows tables of the new variable to include zeros for levels of the new variable that contain no individuals. This makes some RSD/PSD (and likely age-length key) calculations simpler. Also added a drop.levels= argument to allow the user to drop unused levels if so desired.
  • mr.closed(): This function is a combination of the old mr.closed1() and mr.closed2(). It also allows the user to compute single census estimates with multiple sub-groups in the data (i.e., length- or age-classes). The function also allows the user to compute an overall population esitmate of multiple sub-groups are present and an overall SE if the incl.SE=TRUE is used. It also corrects the SE computations implemented in version 0.0-9. This change caused the construction of our internal functions – mrc1, mrc2, ci.mrc1, and ci.mrc2.
  • @@ -2778,8 +3001,10 @@
- +

FSA 0.0-9

  • +Date: unknown
  • +
  • age.comp(): Corrected SE calculation used to construct the CIs. Changed the CI plotting routine to use plotCI in plotrix package – this puts lines rather than points on the ends of the CIs. Added a check for computing SDs and CIs for when n=1 or when all measurements are the same. This reduces (eliminates?) the number of warnings that are given.
  • catch.curve(): added na.rm=TRUE arguments to min() and max() in plot.CC(). Changed type= argument so that “params” is the default rather than “lm”. This makes it more consistent with other simulation programs.
  • @@ -2801,8 +3026,9 @@
- -
  • changed some to in RD files. Changed most hard-wired quotes to or in RD files. Changed some text-based equations to more latex-based equations in or markups. This fixed the Latex compilation problems that I was having when using RCMD check.

  • +

    FSA 0.0-8

    +
    • Date: unknown

    • +
    • changed some to in RD files. Changed most hard-wired quotes to or in RD files. Changed some text-based equations to more latex-based equations in or markups. This fixed the Latex compilation problems that I was having when using RCMD check.

    • age.comp(): Removed single-letter values from the what= argument. Will rely on partial matching.

    • age.key(): Changed default name for new column with ages from “Age” to “age”. Added example.

    • coefplot.WLGM(): Changed to use plotCI() from the plotrix package. This removed the for loop that I had programmed. This also added the sfrac= and gap= arguments. Updated the RD.

    • @@ -2819,8 +3045,9 @@
- -
  • changed to compiling under R 2.6.1.

  • +

    FSA 0.0-7

    +
    • Date: unknown

    • +
    • changed to compiling under R 2.6.1.

    • added FSA.R file that loads the required librarys.

    • now depends on MASS package because of the creation of the boxcox.WLGM() function and on the plotrix package for elements of ycplot().

    • add.radcap(): Created this function to add the radius-at-capture to a one-fish-per-line data frame of increments.

    • @@ -2841,8 +3068,10 @@
- +

FSA 0.0-6

  • +Date: unknown
  • +
  • agebias.plot(): deleted and replaced with agecomp and plot.AgeComp functions.
  • agesunflower.plot(): deleted and replaced with agecomp and plot.AgeComp functions.
  • @@ -2879,28 +3108,22 @@
-
+
- -
- - -
+
- diff --git a/docs/pkgdown.js b/docs/pkgdown.js index 6f0eee40..5fccd9c0 100644 --- a/docs/pkgdown.js +++ b/docs/pkgdown.js @@ -2,70 +2,30 @@ (function($) { $(function() { - $('.navbar-fixed-top').headroom(); + $('nav.navbar').headroom(); - $('body').css('padding-top', $('.navbar').height() + 10); - $(window).resize(function(){ - $('body').css('padding-top', $('.navbar').height() + 10); + Toc.init({ + $nav: $("#toc"), + $scope: $("main h2, main h3, main h4, main h5, main h6") }); - $('[data-toggle="tooltip"]').tooltip(); - - var cur_path = paths(location.pathname); - var links = $("#navbar ul li a"); - var max_length = -1; - var pos = -1; - for (var i = 0; i < links.length; i++) { - if (links[i].getAttribute("href") === "#") - continue; - // Ignore external links - if (links[i].host !== location.host) - continue; - - var nav_path = paths(links[i].pathname); - - var length = prefix_length(nav_path, cur_path); - if (length > max_length) { - max_length = length; - pos = i; - } - } - - // Add class to parent
  • , and enclosing
  • if in dropdown - if (pos >= 0) { - var menu_anchor = $(links[pos]); - menu_anchor.parent().addClass("active"); - menu_anchor.closest("li.dropdown").addClass("active"); - } - }); - - function paths(pathname) { - var pieces = pathname.split("/"); - pieces.shift(); // always starts with / - - var end = pieces[pieces.length - 1]; - if (end === "index.html" || end === "") - pieces.pop(); - return(pieces); - } - - // Returns -1 if not found - function prefix_length(needle, haystack) { - if (needle.length > haystack.length) - return(-1); - - // Special case for length-0 haystack, since for loop won't run - if (haystack.length === 0) { - return(needle.length === 0 ? 0 : -1); + if ($('#toc').length) { + $('body').scrollspy({ + target: '#toc', + offset: $("nav.navbar").outerHeight() + 1 + }); } - for (var i = 0; i < haystack.length; i++) { - if (needle[i] != haystack[i]) - return(i); - } + // Activate popovers + $('[data-bs-toggle="popover"]').popover({ + container: 'body', + html: true, + trigger: 'focus', + placement: "top", + sanitize: false, + }); - return(haystack.length); - } + $('[data-bs-toggle="tooltip"]').tooltip(); /* Clipboard --------------------------*/ @@ -78,7 +38,7 @@ if(ClipboardJS.isSupported()) { $(document).ready(function() { - var copyButton = ""; + var copyButton = ""; $("div.sourceCode").addClass("hasCopyButton"); @@ -89,20 +49,108 @@ $('.btn-copy-ex').tooltip({container: 'body'}); // Initialize clipboard: - var clipboardBtnCopies = new ClipboardJS('[data-clipboard-copy]', { + var clipboard = new ClipboardJS('[data-clipboard-copy]', { text: function(trigger) { return trigger.parentNode.textContent.replace(/\n#>[^\n]*/g, ""); } }); - clipboardBtnCopies.on('success', function(e) { + clipboard.on('success', function(e) { changeTooltipMessage(e.trigger, 'Copied!'); e.clearSelection(); }); - clipboardBtnCopies.on('error', function() { + clipboard.on('error', function() { changeTooltipMessage(e.trigger,'Press Ctrl+C or Command+C to copy'); }); + }); } + + /* Search marking --------------------------*/ + var url = new URL(window.location.href); + var toMark = url.searchParams.get("q"); + var mark = new Mark("main#main"); + if (toMark) { + mark.mark(toMark, { + accuracy: { + value: "complementary", + limiters: [",", ".", ":", "/"], + } + }); + } + + /* Search --------------------------*/ + /* Adapted from https://github.com/rstudio/bookdown/blob/2d692ba4b61f1e466c92e78fd712b0ab08c11d31/inst/resources/bs4_book/bs4_book.js#L25 */ + // Initialise search index on focus + var fuse; + $("#search-input").focus(async function(e) { + if (fuse) { + return; + } + + $(e.target).addClass("loading"); + var response = await fetch($("#search-input").data("search-index")); + var data = await response.json(); + + var options = { + keys: ["what", "text", "code"], + ignoreLocation: true, + threshold: 0.1, + includeMatches: true, + includeScore: true, + }; + fuse = new Fuse(data, options); + + $(e.target).removeClass("loading"); + }); + + // Use algolia autocomplete + var options = { + autoselect: true, + debug: true, + hint: false, + minLength: 2, + }; + var q; +async function searchFuse(query, callback) { + await fuse; + + var items; + if (!fuse) { + items = []; + } else { + q = query; + var results = fuse.search(query, { limit: 20 }); + items = results + .filter((x) => x.score <= 0.75) + .map((x) => x.item); + if (items.length === 0) { + items = [{dir:"Sorry 😿",previous_headings:"",title:"No results found.",what:"No results found.",path:window.location.href}]; + } + } + callback(items); +} + $("#search-input").autocomplete(options, [ + { + name: "content", + source: searchFuse, + templates: { + suggestion: (s) => { + if (s.title == s.what) { + return `${s.dir} >
    ${s.title}
    `; + } else if (s.previous_headings == "") { + return `${s.dir} >
    ${s.title}
    > ${s.what}`; + } else { + return `${s.dir} >
    ${s.title}
    > ${s.previous_headings} > ${s.what}`; + } + }, + }, + }, + ]).on('autocomplete:selected', function(event, s) { + window.location.href = s.path + "?q=" + q + "#" + s.id; + }); + }); })(window.jQuery || window.$) + + diff --git a/docs/pkgdown.yml b/docs/pkgdown.yml index 46239344..21e98315 100644 --- a/docs/pkgdown.yml +++ b/docs/pkgdown.yml @@ -1,8 +1,8 @@ -pandoc: 2.14.0.3 -pkgdown: 2.0.2 +pandoc: 2.19.2 +pkgdown: 2.0.6 pkgdown_sha: ~ articles: {} -last_built: 2022-02-27T02:15Z +last_built: 2022-12-19T16:19Z urls: reference: https://fishr-core-team.github.io/FSA/reference article: https://fishr-core-team.github.io/FSA/articles diff --git a/docs/reference/BluegillJL.html b/docs/reference/BluegillJL.html index 987d3bed..6e994c16 100644 --- a/docs/reference/BluegillJL.html +++ b/docs/reference/BluegillJL.html @@ -1,66 +1,66 @@ -Capture histories (2 samples) of Bluegill from Jewett Lake, MI. — BluegillJL • FSACapture histories (2 samples) of Bluegill from Jewett Lake, MI. — BluegillJL • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Each line consists of the capture history over two samples of Bluegill (Lepomis macrochirus) in Jewett Lake (MI). This file contains the capture histories for only Bluegill larger than 6-in.

    -
    -

    Format

    +
    +

    Format

    A data frame with 277 observations on the following 2 variables.

    first

    a numeric vector of indicator variables for the first sample (1=captured)

    @@ -69,12 +69,12 @@

    Format

    -
    -

    Source

    -

    From example 8.1 in Schneider, J.C. 1998. Lake fish population estimates by mark-and-recapture methods. Chapter 8 in Schneider, J.C. (ed.) 2000. Manual of fisheries survey methods II: with periodic updates. Michigan Department of Natural Resources, Fisheries Special Report 25, Ann Arbor. [Was (is?) from http://www.michigandnr.com/publications/pdfs/IFR/manual/SMII%20Chapter08.pdf.]

    +
    +

    Source

    +

    From example 8.1 in Schneider, J.C. 1998. Lake fish population estimates by mark-and-recapture methods. Chapter 8 in Schneider, J.C. (ed.) 2000. Manual of fisheries survey methods II: with periodic updates. Michigan Department of Natural Resources, Fisheries Special Report 25, Ann Arbor. [Was (is?) from http://www.michigandnr.com/publications/pdfs/IFR/manual/SMII%20Chapter08.pdf.] CSV file

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Population Size

    • @@ -84,18 +84,18 @@

      Topic(s)

    • Petersen

    • Capture History

    -
    -

    See also

    +
    +

    See also

    Used in mrClosed examples.

    -
    -

    Examples

    -
    str(BluegillJL)
    +    
    +

    Examples

    +
    str(BluegillJL)
     #> 'data.frame':	277 obs. of  2 variables:
     #>  $ first : int  1 0 1 0 1 1 1 1 1 1 ...
     #>  $ second: int  0 1 0 1 0 0 0 0 0 0 ...
    -head(BluegillJL)
    +head(BluegillJL)
     #>   first second
     #> 1     1      0
     #> 2     0      1
    @@ -103,29 +103,25 @@ 

    Examples

    #> 4 0 1 #> 5 1 0 #> 6 1 0 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/BrookTroutTH.html b/docs/reference/BrookTroutTH.html index 9994b174..f3b370ed 100644 --- a/docs/reference/BrookTroutTH.html +++ b/docs/reference/BrookTroutTH.html @@ -1,66 +1,66 @@ -Catch-at-age for Tobin Harbor, Isle Royale Brook Trout. — BrookTroutTH • FSACatch-at-age for Tobin Harbor, Isle Royale Brook Trout. — BrookTroutTH • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Catch-at-age in fyke nets from 1996-1998 for “Coaster” Brook Trout (Salvelinus fontinalis) in Tobin Harbor, Isle Royale, Lake Superior.

    -
    -

    Format

    +
    +

    Format

    A data frame with 7 observations on the following 2 variables.

    age

    A numeric vector of assigned ages

    @@ -69,30 +69,30 @@

    Format

    -
    -

    Source

    -

    Quinlan, H.R. 1999. Biological Characteristics of Coaster Brook Trout at Isle Royale National Park, Michigan, 1996-98. U.S. Fish and Wildlife Service Ashland Fishery Resources Office report. November 1999. [Was (is?) from http://www.fws.gov/midwest/ashland/brook/biochar/biolchar.html.]

    +
    +

    Source

    +

    Quinlan, H.R. 1999. Biological Characteristics of Coaster Brook Trout at Isle Royale National Park, Michigan, 1996-98. U.S. Fish and Wildlife Service Ashland Fishery Resources Office report. November 1999. CSV file

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Mortality

    • Catch Curve

    • Chapman-Robson

    -
    -

    See also

    +
    +

    See also

    Used in catchCurve and chapmanRobson examples.

    -
    -

    Examples

    -
    str(BrookTroutTH)
    +    
    +

    Examples

    +
    str(BrookTroutTH)
     #> 'data.frame':	7 obs. of  2 variables:
     #>  $ age  : int  0 1 2 3 4 5 6
     #>  $ catch: int  39 93 112 45 58 12 8
    -head(BrookTroutTH)
    +head(BrookTroutTH)
     #>   age catch
     #> 1   0    39
     #> 2   1    93
    @@ -100,31 +100,27 @@ 

    Examples

    #> 4 3 45 #> 5 4 58 #> 6 5 12 -plot(log(catch)~age,data=BrookTroutTH) +plot(log(catch)~age,data=BrookTroutTH) - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/ChinookArg.html b/docs/reference/ChinookArg.html index e01545a9..cc118caa 100644 --- a/docs/reference/ChinookArg.html +++ b/docs/reference/ChinookArg.html @@ -1,66 +1,66 @@ -Lengths and weights for Chinook Salmon from three locations in Argentina. — ChinookArg • FSALengths and weights for Chinook Salmon from three locations in Argentina. — ChinookArg • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Lengths and weights for Chinook Salmon from three locations in Argentina.

    -
    -

    Format

    +
    +

    Format

    A data frame with 112 observations on the following 3 variables:

    tl

    Total length (cm)

    @@ -72,29 +72,29 @@

    Format

    -
    -

    Source

    -

    From Figure 4 in Soto, D., I. Arismendi, C. Di Prinzio, and F. Jara. 2007. Establishment of Chinook Salmon (Oncorhynchus tshawytscha) in Pacific basins of southern South America and its potential ecosystem implications. Revista Chilena d Historia Natural, 80:81-98. [Was (is?) from http://www.scielo.cl/pdf/rchnat/v80n1/art07.pdf.]

    +
    +

    Source

    +

    From Figure 4 in Soto, D., I. Arismendi, C. Di Prinzio, and F. Jara. 2007. Establishment of Chinook Salmon (Oncorhynchus tshawytscha) in Pacific basins of southern South America and its potential ecosystem implications. Revista Chilena d Historia Natural, 80:81-98. [Was (is?) from http://www.scielo.cl/pdf/rchnat/v80n1/art07.pdf.] CSV file

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Weight-Length

    -
    -

    See also

    +
    +

    See also

    Used in lwCompPreds examples.

    -
    -

    Examples

    -
    str(ChinookArg)
    +    
    +

    Examples

    +
    str(ChinookArg)
     #> 'data.frame':	112 obs. of  3 variables:
     #>  $ tl : num  120 115 111 110 110 ...
     #>  $ w  : num  17.9 17.2 16.8 15.8 14.3 13.8 12.8 11.7 12.8 14.8 ...
     #>  $ loc: Factor w/ 3 levels "Argentina","Petrohue",..: 1 1 1 1 1 1 1 1 1 1 ...
    -head(ChinookArg)
    +head(ChinookArg)
     #>      tl    w       loc
     #> 1 120.1 17.9 Argentina
     #> 2 115.0 17.2 Argentina
    @@ -102,35 +102,31 @@ 

    Examples

    #> 4 110.2 15.8 Argentina #> 5 110.0 14.3 Argentina #> 6 109.7 13.8 Argentina -op <- par(mfrow=c(2,2),pch=19,mar=c(3,3,0.5,0.5),mgp=c(1.9,0.5,0),tcl=-0.2) -plot(w~tl,data=ChinookArg,subset=loc=="Argentina") -plot(w~tl,data=ChinookArg,subset=loc=="Petrohue") -plot(w~tl,data=ChinookArg,subset=loc=="Puyehue") -par(op) +op <- par(mfrow=c(2,2),pch=19,mar=c(3,3,0.5,0.5),mgp=c(1.9,0.5,0),tcl=-0.2) +plot(w~tl,data=ChinookArg,subset=loc=="Argentina") +plot(w~tl,data=ChinookArg,subset=loc=="Petrohue") +plot(w~tl,data=ChinookArg,subset=loc=="Puyehue") +par(op) - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/CodNorwegian.html b/docs/reference/CodNorwegian.html index 7eef0670..00cd016a 100644 --- a/docs/reference/CodNorwegian.html +++ b/docs/reference/CodNorwegian.html @@ -1,66 +1,66 @@ -Stock and recruitment data for Norwegian cod, 1937-1960. — CodNorwegian • FSAStock and recruitment data for Norwegian cod, 1937-1960. — CodNorwegian • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Norwegian cod (Gadus morhua) stock and recruitment by year, 1937-1960.

    -
    -

    Format

    +
    +

    Format

    A data frame of 24 observations on the following 3 variables:

    year

    Year of data

    @@ -72,30 +72,30 @@

    Format

    -
    -

    Source

    -

    From Garrod, D.J. 1967. Population dynamics of the Arcto-Norwegian Cod. Journal of the Fisheries Research Board of Canada, 24:145-190.

    +
    +

    Source

    +

    From Garrod, D.J. 1967. Population dynamics of the Arcto-Norwegian Cod. Journal of the Fisheries Research Board of Canada, 24:145-190. CSV file

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Stock-Recruit

    • Recruitment

    -
    -

    See also

    +
    +

    See also

    Used in srStarts, srFuns, and nlsTracePlot examples.

    -
    -

    Examples

    -
    str(CodNorwegian)
    +    
    +

    Examples

    +
    str(CodNorwegian)
     #> 'data.frame':	24 obs. of  3 variables:
     #>  $ year    : int  1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 ...
     #>  $ recruits: int  146 31 17 26 43 58 113 75 99 70 ...
     #>  $ stock   : int  118 164 180 172 151 139 122 114 139 140 ...
    -head(CodNorwegian)
    +head(CodNorwegian)
     #>   year recruits stock
     #> 1 1937      146   118
     #> 2 1938       31   164
    @@ -103,34 +103,30 @@ 

    Examples

    #> 4 1940 26 172 #> 5 1941 43 151 #> 6 1942 58 139 -op <- par(mfrow=c(1,2),pch=19,mar=c(3,3,0.5,0.5),mgp=c(1.9,0.5,0),tcl=-0.2) -plot(recruits~year,data=CodNorwegian,type="l") -plot(recruits~stock,data=CodNorwegian) +op <- par(mfrow=c(1,2),pch=19,mar=c(3,3,0.5,0.5),mgp=c(1.9,0.5,0),tcl=-0.2) +plot(recruits~year,data=CodNorwegian,type="l") +plot(recruits~stock,data=CodNorwegian) -par(op) - +par(op) +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/CutthroatAL.html b/docs/reference/CutthroatAL.html index 4bceff7e..147f0a08 100644 --- a/docs/reference/CutthroatAL.html +++ b/docs/reference/CutthroatAL.html @@ -1,66 +1,66 @@ -Capture histories (9 samples) of Cutthroat Trout from Auke Lake. — CutthroatAL • FSACapture histories (9 samples) of Cutthroat Trout from Auke Lake. — CutthroatAL • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Individual capture histories of Cutthroat Trout (Oncorhynchus clarki) in Auke Lake, Alaska, from samples taken in 1998-2006.

    -
    -

    Format

    +
    +

    Format

    A data frame with 1684 observations on the following 10 variables.

    id

    Unique identification numbers for each fish

    @@ -93,16 +93,16 @@

    Format

    -
    -

    Source

    -

    From Appendix A.3 of Harding, R.D., C.L. Hoover, and R.P. Marshall. 2010. Abundance of Cutthroat Trout in Auke Lake, Southeast Alaska, in 2005 and 2006. Alaska Department of Fish and Game Fisheries Data Series No. 10-82. [Was (is?) from http://www.sf.adfg.state.ak.us/FedAidPDFs/FDS10-82.pdf.]

    +
    +

    Source

    +

    From Appendix A.3 of Harding, R.D., C.L. Hoover, and R.P. Marshall. 2010. Abundance of Cutthroat Trout in Auke Lake, Southeast Alaska, in 2005 and 2006. Alaska Department of Fish and Game Fisheries Data Series No. 10-82. [Was (is?) from http://www.sf.adfg.state.ak.us/FedAidPDFs/FDS10-82.pdf.] CSV file

    -
    -

    Note

    -

    Entered into “RMark” format (see CutthroatALf in FSAdata) and then converted to individual format with capHistConvert

    +
    +

    Note

    +

    Entered into “RMark” format (see CutthroatALf in FSAdata) and then converted to individual format with capHistConvert

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Population Size

    • @@ -112,14 +112,14 @@

      Topic(s)

    • Jolly-Seber

    • Capture History

    -
    -

    See also

    +
    +

    See also

    Used in mrOpen examples.

    -
    -

    Examples

    -
    str(CutthroatAL)
    +    
    +

    Examples

    +
    str(CutthroatAL)
     #> 'data.frame':	1684 obs. of  10 variables:
     #>  $ id   : int  1 2 3 4 5 6 7 8 9 10 ...
     #>  $ y1998: int  0 0 0 0 0 0 0 0 0 0 ...
    @@ -131,7 +131,7 @@ 

    Examples

    #> $ y2004: int 0 0 0 0 0 0 0 0 0 0 ... #> $ y2005: int 0 0 0 0 0 0 0 0 0 0 ... #> $ y2006: int 1 1 1 1 1 1 1 1 1 1 ... -head(CutthroatAL) +head(CutthroatAL) #> id y1998 y1999 y2000 y2001 y2002 y2003 y2004 y2005 y2006 #> 1 1 0 0 0 0 0 0 0 0 1 #> 2 2 0 0 0 0 0 0 0 0 1 @@ -139,29 +139,25 @@

    Examples

    #> 4 4 0 0 0 0 0 0 0 0 1 #> 5 5 0 0 0 0 0 0 0 0 1 #> 6 6 0 0 0 0 0 0 0 0 1 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/Ecoli.html b/docs/reference/Ecoli.html index d04f523d..c027bb61 100644 --- a/docs/reference/Ecoli.html +++ b/docs/reference/Ecoli.html @@ -1,66 +1,66 @@ -Population growth of Escherichia coli. — Ecoli • FSAPopulation growth of Escherichia coli. — Ecoli • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    The number of Escherichia coli cells versus time.

    -
    -

    Format

    +
    +

    Format

    A data frame with 8 observations on the following 2 variables:

    days

    Elapsed duration of the experiment

    @@ -69,38 +69,34 @@

    Format

    -
    -

    Source

    -

    McKendrick, A.G. and M. Kesava Pai. 1911. The Rate of Multiplication of Micro-Organisms: a Mathematical Study. Proceedings of the Royal Society of Edinburgh. 31:649-655.

    +
    +

    Source

    +

    McKendrick, A.G. and M. Kesava Pai. 1911. The Rate of Multiplication of Micro-Organisms: a Mathematical Study. Proceedings of the Royal Society of Edinburgh. 31:649-655. CSV file

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Nonlinear Model

    • Other

    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/FSA-internals.html b/docs/reference/FSA-internals.html index fd0f48fa..95a345c5 100644 --- a/docs/reference/FSA-internals.html +++ b/docs/reference/FSA-internals.html @@ -1,88 +1,84 @@ -Internal functions. — .onAttach • FSAInternal functions. — .onAttach • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Internal functions that are common to several functions in FSA.

    -
    -
    .onAttach(lib, pkg, ...)
    +
    +

    Usage

    +
    .onAttach(lib, pkg, ...)
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/FSA.html b/docs/reference/FSA.html index 5b2638bb..332b6d60 100644 --- a/docs/reference/FSA.html +++ b/docs/reference/FSA.html @@ -1,97 +1,93 @@ -Fisheries stock assessment methods and data. — FSA • FSAFisheries stock assessment methods and data. — FSA • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Functions to support basic fisheries stock assessment methods.

    -
    -

    Details

    -

    Functions from this package can be used to perform a variety of basic fisheries stock assessment methods. Detailed descriptions for most functions are available in the Introductory Fisheries Analysis with R book (Ogle 2016). Vignettes for the boxed examples in the “Analysis and Interpretation of Freshwater Fisheries Data” book can be viewed with fishR("AIFFD").

    +
    +

    Details

    +

    Functions from this package can be used to perform a variety of basic fisheries stock assessment methods. Detailed descriptions for most functions are available in the Introductory Fisheries Analysis with R book (Ogle 2016) (see fishR("IFAR")). Vignettes for the boxed examples in the “Analysis and Interpretation of Freshwater Fisheries Data” book can be viewed with fishR("AIFFD").

    Questions, comments, or suggestions should be given on the GitHub FSA Issues page.

    Packages with related functionality by the same author are

    • The FSAdata package contains additional data sets.

    • The FSAsim package simulation routines for various fisheries methods.

    • The FSAWs package contains functions for developing and validating standard weight equations.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/Mirex.html b/docs/reference/Mirex.html index 40c3a5f9..7b04bec6 100644 --- a/docs/reference/Mirex.html +++ b/docs/reference/Mirex.html @@ -1,66 +1,66 @@ -Mirex concentration, weight, capture year, and species of Lake Ontario salmon. — Mirex • FSAMirex concentration, weight, capture year, and species of Lake Ontario salmon. — Mirex • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Mirex concentration, weight, capture year, and species of Lake Ontario Coho and Chinook salmon.

    -
    -

    Format

    +
    +

    Format

    A data frame with 122 observations on the following 4 variables.

    year

    a numeric vector of capture years

    @@ -75,27 +75,27 @@

    Format

    -
    -

    Source

    -

    From (actual data) Makarewicz, J.C., E.Damaske, T.W. Lewis, and M. Merner. 2003. Trend analysis reveals a recent reduction in mirex concentrations in Coho (Oncorhynchus kisutch) and Chinook (O. tshawytscha) Salmon from Lake Ontario. Environmental Science and Technology, 37:1521-1527.

    +
    +

    Source

    +

    From (actual data) Makarewicz, J.C., E.Damaske, T.W. Lewis, and M. Merner. 2003. Trend analysis reveals a recent reduction in mirex concentrations in Coho (Oncorhynchus kisutch) and Chinook (O. tshawytscha) Salmon from Lake Ontario. Environmental Science and Technology, 37:1521-1527. CSV file

    -
    -

    Details

    +
    +

    Details

    The year variable should be converted to a factor as shown in the example.

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Linear models

    • Other

    -
    -

    Examples

    -
    Mirex$year <- factor(Mirex$year)
    -lm1 <- lm(mirex~weight*year*species,data=Mirex)
    -anova(lm1)
    +    
    +

    Examples

    +
    Mirex$year <- factor(Mirex$year)
    +lm1 <- lm(mirex~weight*year*species,data=Mirex)
    +anova(lm1)
     #> Analysis of Variance Table
     #> 
     #> Response: mirex
    @@ -110,29 +110,25 @@ 

    Examples

    #> Residuals 98 0.36372 0.003711 #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/PSDlit.html b/docs/reference/PSDlit.html index c5db4e35..1a502f25 100644 --- a/docs/reference/PSDlit.html +++ b/docs/reference/PSDlit.html @@ -1,66 +1,66 @@ -Gabelhouse five-cell length categories for various species. — PSDlit • FSAGabelhouse five-cell length categories for various species. — PSDlit • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Cutoffs for the Gabelhouse five-cell length categories for a variety of species.

    -
    -

    Format

    +
    +

    Format

    A data frame of 58 observations on the following 11 variables:

    species

    Species name.

    @@ -99,12 +99,12 @@

    Format

    -
    -

    Source

    +
    +

    Source

    Original summary table from Dr. Michael Hansen, University of Wisconsin-Stevens Point. Additional species have been added by the package author from the literature.

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Size structure

    • @@ -112,22 +112,22 @@

      Topic(s)

    • Relative stock density

    • Proportional stock density

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    6-Size Structure.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    See also

    +
    +

    See also

    See psdVal, psdCalc, psdPlot, psdAdd, and tictactoe for related functionality.

    -
    -

    Examples

    -
    str(PSDlit)
    +    
    +

    Examples

    +
    str(PSDlit)
     #> 'data.frame':	68 obs. of  12 variables:
     #>  $ species     : chr  "Arctic Grayling" "Bighead Carp" "Bigmouth Buffalo" "Black Bullhead" ...
     #>  $ stock.in    : num  8 11.8 11 6 15.8 ...
    @@ -141,7 +141,7 @@ 

    Examples

    #> $ memorable.cm: int 50 89 76 39 118 30 89 25 NA NA ... #> $ trophy.cm : num 55 111 94 46 148 38 114 30 NA NA ... #> $ source : chr "Hyatt (2000)" "Phelps and Willis (2013)" "Bister et al. (2000)" "Gabelhouse (1984a)" ... -head(PSDlit) +head(PSDlit) #> species stock.in quality.in preferred.in memorable.in trophy.in #> 1 Arctic Grayling 8.00 12.00 16.00 20.0 22.00 #> 2 Bighead Carp 11.75 21.25 26.75 35.0 43.75 @@ -163,29 +163,25 @@

    Examples

    #> 4 Gabelhouse (1984a) #> 5 Phelps and Willis (2013) #> 6 Gabelhouse (1984a) - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/PikeNY.html b/docs/reference/PikeNY.html index 8ebc7ea1..5199d543 100644 --- a/docs/reference/PikeNY.html +++ b/docs/reference/PikeNY.html @@ -1,66 +1,66 @@ -Summarized multiple mark-recapture data for all Northern Pike from Buckhorn Marsh, NY. — PikeNY • FSASummarized multiple mark-recapture data for all Northern Pike from Buckhorn Marsh, NY. — PikeNY • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Summary results of capture histories (number captured, number of recaptured fish, and number of unmarked fish that were marked) for all Buckhorn Marsh Northern Pike (Esox lucius).

    -
    -

    Format

    +
    +

    Format

    A data frame with 21 observations on the following 4 variables:

    date

    Capture date

    @@ -75,12 +75,12 @@

    Format

    -
    -

    Source

    -

    New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216).

    +
    +

    Source

    +

    New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216). CSV file

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Population Size

    • @@ -90,20 +90,20 @@

      Topic(s)

    • Schnabel

    • Schumacher-Eschmeyer

    -
    -

    See also

    +
    +

    See also

    Used in mrClosed examples. Also see PikeNYPartial1.

    -
    -

    Examples

    -
    str(PikeNY)
    +    
    +

    Examples

    +
    str(PikeNY)
     #> 'data.frame':	21 obs. of  4 variables:
     #>  $ date: Factor w/ 21 levels "1-Apr","1-May",..: 15 16 19 20 1 12 17 21 3 4 ...
     #>  $ n   : int  2 3 2 3 20 18 14 9 17 6 ...
     #>  $ m   : int  0 0 0 0 2 3 4 4 14 5 ...
     #>  $ R   : int  2 3 2 3 20 18 13 9 17 6 ...
    -head(PikeNY)
    +head(PikeNY)
     #>     date  n m  R
     #> 1 28-Mar  2 0  2
     #> 2 29-Mar  3 0  3
    @@ -111,29 +111,25 @@ 

    Examples

    #> 4 31-Mar 3 0 3 #> 5 1-Apr 20 2 20 #> 6 2-Apr 18 3 18 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/PikeNYPartial1.html b/docs/reference/PikeNYPartial1.html index 8778cc4d..e93e4fa6 100644 --- a/docs/reference/PikeNYPartial1.html +++ b/docs/reference/PikeNYPartial1.html @@ -1,66 +1,66 @@ -Capture histories (4 samples), in capture history format, of a subset of Northern Pike from Buckhorn Marsh, NY. — PikeNYPartial1 • FSACapture histories (4 samples), in capture history format, of a subset of Northern Pike from Buckhorn Marsh, NY. — PikeNYPartial1 • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Each line consists of the capture history over four samples of Northern Pike (Esox lucius) in Buckhorn Marsh. This file contains the capture histories for only those pike captured from April 1-4.

    -
    -

    Format

    +
    +

    Format

    A data frame with 57 observations on the following 4 variables.

    id

    A unique identification numbers

    @@ -78,12 +78,12 @@

    Format

    -
    -

    Source

    -

    Summary values taken from Table C-1 of New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216).

    +
    +

    Source

    +

    Summary values taken from Table C-1 of New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216). CSV file

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Population Size

    • @@ -94,21 +94,21 @@

      Topic(s)

    • Schumacher-Eschmeyer

    • Capture History

    -
    -

    See also

    +
    +

    See also

    Used in capHistSum and mrClosed examples. Also see PikeNY.

    -
    -

    Examples

    -
    str(PikeNYPartial1)
    +    
    +

    Examples

    +
    str(PikeNYPartial1)
     #> 'data.frame':	57 obs. of  5 variables:
     #>  $ id    : int  2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 ...
     #>  $ first : int  1 1 1 1 1 1 1 1 1 1 ...
     #>  $ second: int  0 0 0 0 0 0 0 0 0 0 ...
     #>  $ third : int  0 0 0 0 0 0 0 0 0 0 ...
     #>  $ fourth: int  0 0 0 0 0 0 0 0 0 0 ...
    -head(PikeNYPartial1)
    +head(PikeNYPartial1)
     #>     id first second third fourth
     #> 1 2001     1      0     0      0
     #> 2 2002     1      0     0      0
    @@ -116,29 +116,25 @@ 

    Examples

    #> 4 2004 1 0 0 0 #> 5 2005 1 0 0 0 #> 6 2006 1 0 0 0 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/SMBassLS.html b/docs/reference/SMBassLS.html index 327e77c3..a4d85259 100644 --- a/docs/reference/SMBassLS.html +++ b/docs/reference/SMBassLS.html @@ -1,66 +1,66 @@ -Catch-effort data for Little Silver Lake (Ont) Smallmouth Bass. — SMBassLS • FSACatch-effort data for Little Silver Lake (Ont) Smallmouth Bass. — SMBassLS • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Catch-effort data for Smallmouth Bass (Micropterus dolomieu) in Little Silver Lake, Ont.

    -
    -

    Format

    +
    +

    Format

    A data frame with 10 observations on the following 3 variables:

    day

    Day of the catch

    @@ -72,12 +72,12 @@

    Format

    -
    -

    Source

    -

    From Omand, D.N. 1951. A study of populations of fish based on catch-effort statistics. Journal of Wildlife Management, 15:88-98.

    +
    +

    Source

    +

    From Omand, D.N. 1951. A study of populations of fish based on catch-effort statistics. Journal of Wildlife Management, 15:88-98. CSV file

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Population size

    • @@ -87,19 +87,19 @@

      Topic(s)

    • DeLury method

    • Catchability

    -
    -

    See also

    +
    +

    See also

    Used in depletion examples.

    -
    -

    Examples

    -
    str(SMBassLS)
    +    
    +

    Examples

    +
    str(SMBassLS)
     #> 'data.frame':	10 obs. of  3 variables:
     #>  $ day   : int  1 2 3 4 5 6 7 8 9 10
     #>  $ catch : int  131 69 99 78 56 76 49 42 63 47
     #>  $ effort: int  7 7 7 7 7 7 7 7 7 7
    -head(SMBassLS)
    +head(SMBassLS)
     #>   day catch effort
     #> 1   1   131      7
     #> 2   2    69      7
    @@ -107,29 +107,25 @@ 

    Examples

    #> 4 4 78 7 #> 5 5 56 7 #> 6 6 76 7 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/SMBassWB.html b/docs/reference/SMBassWB.html index a723b280..9b2f8c4d 100644 --- a/docs/reference/SMBassWB.html +++ b/docs/reference/SMBassWB.html @@ -1,66 +1,66 @@ -Growth increment data for West Bearskin Lake, MN, Smallmouth Bass. — SMBassWB • FSAGrowth increment data for West Bearskin Lake, MN, Smallmouth Bass. — SMBassWB • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Growth data from Smallmouth Bass (Micropterus dolomieu) captured in West Bearskin Lake, MN. Five samples were collected over three years (1988-1990) with two gears (fall -- trapnets, spring -- electrofishing).

    -
    -

    Format

    +
    +

    Format

    A data frame of 445 observations on the following 20 variables:

    species

    Species of the fish (SMB for each fish in this file)

    @@ -123,30 +123,30 @@

    Format

    -
    -

    Source

    -

    Data from the linear growth modeling software distributed in support of Weisberg, S. 1993. Using hard-part increment data to estimate age and environmental effects. Canadian Journal of Fisheries and Aquatic Sciences 50:1229-1237.

    +
    +

    Source

    +

    Data from the linear growth modeling software distributed in support of Weisberg, S. 1993. Using hard-part increment data to estimate age and environmental effects. Canadian Journal of Fisheries and Aquatic Sciences 50:1229-1237. CSV file

    -
    -

    Note

    +
    +

    Note

    Data are in one-fish-per-line format.

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Growth increment analysis

    • Weisberg linear growth model

    • Back-Calculation

    -
    -

    See also

    +
    +

    See also

    Used in capHistSum and mrClosed examples. Also see wblake from alr4 for the same dataset with only the agecap, lencap, and radcap variables.

    -
    -

    Examples

    -
    str(SMBassWB)
    +    
    +

    Examples

    +
    str(SMBassWB)
     #> 'data.frame':	445 obs. of  20 variables:
     #>  $ species: Factor w/ 1 level "SMB": 1 1 1 1 1 1 1 1 1 1 ...
     #>  $ lake   : Factor w/ 1 level "WB": 1 1 1 1 1 1 1 1 1 1 ...
    @@ -168,7 +168,7 @@ 

    Examples

    #> $ anu11 : num NA NA NA NA NA NA NA NA NA NA ... #> $ anu12 : num NA NA NA NA NA NA NA NA NA NA ... #> $ radcap : num 1.91 1.88 1.1 1.33 1.59 ... -head(SMBassWB) +head(SMBassWB) #> species lake gear yearcap fish agecap lencap anu1 anu2 anu3 anu4 anu5 anu6 #> 1 SMB WB E 1988 5 1 71 1.90606 NA NA NA NA NA #> 2 SMB WB E 1988 3 1 64 1.87707 NA NA NA NA NA @@ -183,29 +183,25 @@

    Examples

    #> 4 NA NA NA NA NA NA 1.33108 #> 5 NA NA NA NA NA NA 1.59283 #> 6 NA NA NA NA NA NA 1.91602 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/Schnute.html b/docs/reference/Schnute.html index 000b22bd..b41d8c45 100644 --- a/docs/reference/Schnute.html +++ b/docs/reference/Schnute.html @@ -1,120 +1,138 @@ -The four-parameter growth function from Schnute (1981). — Schnute • FSAThe four-parameter growth function from Schnute (1981). — Schnute • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    The four-parameter growth function from Schnute (1981).

    -
    -
    Schnute(
    -  t,
    -  case = 1,
    -  t1 = NULL,
    -  t3 = NULL,
    -  L1 = NULL,
    -  L3 = NULL,
    -  a = NULL,
    -  b = NULL
    -)
    +
    +

    Usage

    +
    Schnute(
    +  t,
    +  case = 1,
    +  t1 = NULL,
    +  t3 = NULL,
    +  L1 = NULL,
    +  L3 = NULL,
    +  a = NULL,
    +  b = NULL
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    t

    A numeric vector of ages over which to model growth.

    + +
    case

    A string that indicates the case of the Schnute growth function to use.

    + +
    t1

    The (young) age that corresponds to L1. Set to minimum value in t by default.

    + +
    t3

    The (old) age that corresponds to L3. Set to maximum value in t by default.

    + +
    L1

    The mean size/length at t1.

    + +
    L3

    The mean size/length at t3.

    + +
    a

    A dimensionless parameter that is related to the time/age at the inflection point.

    + +
    b

    A dimensionless parameter that is related to size/length at the inflection point.

    +
    -
    -

    Value

    -

    Schnute returns a predicted size given the case of the function and the provided parameter values.

    +
    +

    Value

    + + +

    Schnute returns a predicted size given the case of the function and the provided parameter values.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    None specifically, but 12-Individual Growth is related.

    -
    -

    References

    +
    +

    References

    Schnute, J. 1981. A versatile growth model with statistical stable parameters. Canadian Journal of Fisheries and Aquatic Sciences 38:1128-1140.

    -
    -

    See also

    +
    +

    See also

    See vbFuns, GompertzFuns, RichardsFuns, logisticFuns, and SchnuteRichards for similar functionality for other models.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## See the formulae
    -growthFunShow("Schnute",1,plot=TRUE)
    +    
    +

    Examples

    +
    ## See the formulae
    +growthFunShow("Schnute",1,plot=TRUE)
     
     #> expression(E(L[t]) == bgroup("[", L[1]^{
     #>     b
    @@ -129,7 +147,7 @@ 

    Examples

    #> }), "]")^{ #> ~frac(1, b) #> }) -growthFunShow("Schnute",2,plot=TRUE) +growthFunShow("Schnute",2,plot=TRUE) #> expression(E(L[t]) == L[1] * e^{ #> log ~ bgroup("(", frac(L[3], L[1]), ")") * ~frac(1 - e^{ @@ -138,7 +156,7 @@

    Examples

    #> -a * (~t[3] ~ -~t[1]) #> }) #> }) -growthFunShow("Schnute",3,plot=TRUE) +growthFunShow("Schnute",3,plot=TRUE) #> expression(E(L[t]) == bgroup("[", L[1]^{ #> b @@ -149,48 +167,44 @@

    Examples

    #> }) * ~frac(~t ~ -~t[1], ~t[3] ~ -~t[1]), "]")^{ #> ~frac(1, b) #> }) -growthFunShow("Schnute",4,plot=TRUE) +growthFunShow("Schnute",4,plot=TRUE) #> expression(E(L[t]) == L[1] * e^{ #> log ~ bgroup("(", frac(L[3], L[1]), ")") * ~frac(~t ~ -~t[1], #> ~t[3] ~ -~t[1]) #> }) - -## Simple examples -ages <- 1:15 -s1 <- Schnute(ages,case=1,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1) -s2 <- Schnute(ages,case=2,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1) -s3 <- Schnute(ages,case=3,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1) -s4 <- Schnute(ages,case=4,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1) - -plot(s1~ages,type="l",lwd=2) -lines(s2~ages,lwd=2,col="red") -lines(s3~ages,lwd=2,col="blue") -lines(s4~ages,lwd=2,col="green") + +## Simple examples +ages <- 1:15 +s1 <- Schnute(ages,case=1,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1) +s2 <- Schnute(ages,case=2,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1) +s3 <- Schnute(ages,case=3,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1) +s4 <- Schnute(ages,case=4,t1=1,t3=15,L1=30,L3=400,a=0.3,b=1) + +plot(s1~ages,type="l",lwd=2) +lines(s2~ages,lwd=2,col="red") +lines(s3~ages,lwd=2,col="blue") +lines(s4~ages,lwd=2,col="green") - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/SpotVA1.html b/docs/reference/SpotVA1.html index 7185cee0..0a8b8f32 100644 --- a/docs/reference/SpotVA1.html +++ b/docs/reference/SpotVA1.html @@ -1,66 +1,66 @@ -Age and length of spot. — SpotVA1 • FSAAge and length of spot. — SpotVA1 • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Ages (from otoliths) and lengths of Virginia Spot (Leiostomus xanthurus).

    -
    -

    Format

    +
    +

    Format

    A data frame of 403 observations on the following 2 variables:

    tl

    Measured total lengths (in inches)

    @@ -69,33 +69,33 @@

    Format

    -
    -

    Source

    -

    Extracted from Table 1 in Chapter 8 (Spot) of the VMRC Final Report on Finfish Ageing, 2002 by the Center for Quantitative Fisheries Ecology at Old Dominion University.

    +
    +

    Source

    +

    Extracted from Table 1 in Chapter 8 (Spot) of the VMRC Final Report on Finfish Ageing, 2002 by the Center for Quantitative Fisheries Ecology at Old Dominion University. CSV file

    -
    -

    Details

    +
    +

    Details

    Final length measurements were simulated by adding a uniform error to the value at the beginning of the length category.

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Growth

    • von Bertalanffy

    -
    -

    See also

    -

    Used in vbFuns, vbStarts, and nlsTracePlot examples. Also see SpotVA2 in FSAdata for related data.

    +
    +

    See also

    +

    Used in vbFuns, vbStarts, and nlsTracePlot examples. Also see SpotVA2 in FSAdata for related data.

    -
    -

    Examples

    -
    str(SpotVA1)
    +    
    +

    Examples

    +
    str(SpotVA1)
     #> 'data.frame':	403 obs. of  2 variables:
     #>  $ tl : num  6.5 6.3 7.4 7.1 7.7 7.1 7.9 7.3 7.5 7.3 ...
     #>  $ age: int  0 0 0 0 0 0 0 0 0 0 ...
    -head(SpotVA1)
    +head(SpotVA1)
     #>    tl age
     #> 1 6.5   0
     #> 2 6.3   0
    @@ -103,31 +103,27 @@ 

    Examples

    #> 4 7.1 0 #> 5 7.7 0 #> 6 7.1 0 -plot(tl~age,data=SpotVA1) +plot(tl~age,data=SpotVA1) - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/Summarize.html b/docs/reference/Summarize.html index d1483044..3eb6a765 100644 --- a/docs/reference/Summarize.html +++ b/docs/reference/Summarize.html @@ -1,200 +1,218 @@ -Summary statistics for a numeric variable. — Summarize • FSASummary statistics for a numeric variable. — Summarize • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Summary statistics for a single numeric variable, possibly separated by the levels of a factor variable or variables. This function is very similar to summary for a numeric variable.

    -
    -
    Summarize(object, ...)
    -
    -# S3 method for default
    -Summarize(
    -  object,
    -  digits = getOption("digits"),
    -  na.rm = TRUE,
    -  exclude = NULL,
    -  nvalid = c("different", "always", "never"),
    -  percZero = c("different", "always", "never"),
    -  ...
    -)
    -
    -# S3 method for formula
    -Summarize(
    -  object,
    -  data = NULL,
    -  digits = getOption("digits"),
    -  na.rm = TRUE,
    -  exclude = NULL,
    -  nvalid = c("different", "always", "never"),
    -  percZero = c("different", "always", "never"),
    -  ...
    -)
    +
    +

    Usage

    +
    Summarize(object, ...)
    +
    +# S3 method for default
    +Summarize(
    +  object,
    +  digits = getOption("digits"),
    +  na.rm = TRUE,
    +  exclude = NULL,
    +  nvalid = c("different", "always", "never"),
    +  percZero = c("different", "always", "never"),
    +  ...
    +)
    +
    +# S3 method for formula
    +Summarize(
    +  object,
    +  data = NULL,
    +  digits = getOption("digits"),
    +  na.rm = TRUE,
    +  exclude = NULL,
    +  nvalid = c("different", "always", "never"),
    +  percZero = c("different", "always", "never"),
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    object

    A vector of numeric data.

    + +
    ...

    Not implemented.

    + +
    digits

    A single numeric that indicates the number of decimals to round the numeric summaries.

    + +
    na.rm

    A logical that indicates whether numeric missing values (NA) should be removed (=TRUE, default) or not.

    + +
    exclude

    A string that contains the level that should be excluded from a factor variable.

    + +
    nvalid

    A string that indicates how the “validn” result will be handled. If "always" then “validn” will always be shown and if "never" then “validn” will never be shown. However, if "different" (DEFAULT), then “validn” will only be shown if it differs from “n” (or if at least one group differs from “n” when summarized by multiple groups).

    + +
    percZero

    A string that indicates how the “percZero” result will be handled. If "always" then “percZero” will always be shown and if "never" then “percZero” will never be shown. However, if "different" (DEFAULT), then “percZero” will only be shown if it is greater than zero (or if at least one group is greater than zero when summarized by multiple groups).

    + +
    data

    A data.frame that contains the variables in formula.

    +
    -
    -

    Value

    -

    A named vector or data frame (when a quantitative variable is separated by one or two factor variables) of summary statistics for numeric data.

    +
    +

    Value

    + + +

    A named vector or data frame (when a quantitative variable is separated by one or two factor variables) of summary statistics for numeric data.

    -
    -

    Details

    +
    +

    Details

    This function is primarily used with formulas of the following types (where quant and factor generically represent quantitative/numeric and factor variables, respectively):

    FormulaDescription of Summary
    ~quantNumerical summaries (see below) of quant.
    quant~factorSummaries of quant separated by levels in factor.
    quant~factor1*factor2Summaries of quant separated by the combined levels in factor1 and factor2.

    Numerical summaries include all results from summary (min, Q1, mean, median, Q3, and max) and the sample size, valid sample size (sample size minus number of NAs), and standard deviation (i.e., sd). NA values are removed from the calculations with na.rm=TRUE (the DEFAULT). The number of digits in the returned results are controlled with digits=.

    -
    -

    Note

    +
    +

    Note

    Students often need to examine basic statistics of a quantitative variable separated for different levels of a categorical variable. These results may be obtained with tapply, by, or aggregate (or with functions in other packages), but the use of these functions is not obvious to newbie students or return results in a format that is not obvious to newbie students. Thus, the formula method to Summarize allows newbie students to use a common notation (i.e., formula) to easily compute summary statistics for a quantitative variable separated by the levels of a factor.

    -
    -

    See also

    +
    +

    See also

    See summary for related one dimensional functionality. See tapply, summaryBy in doBy, describe in psych, describe in prettyR, and basicStats in fBasics for similar “by” functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Create a data.frame of "data"
    -n <- 102
    -d <- data.frame(y=c(0,0,NA,NA,NA,runif(n-5)),
    -                w=sample(7:9,n,replace=TRUE),
    -                v=sample(0:2,n,replace=TRUE),
    -                g1=factor(sample(c("A","B","C",NA),n,replace=TRUE)),
    -                g2=factor(sample(c("male","female","UNKNOWN"),n,replace=TRUE)),
    -                g3=sample(c("a","b","c","d"),n,replace=TRUE),
    -                stringsAsFactors=FALSE)
    -
    -# typical output of summary() for a numeric variable
    -summary(d$y)   
    +    
    +

    Examples

    +
    ## Create a data.frame of "data"
    +n <- 102
    +d <- data.frame(y=c(0,0,NA,NA,NA,runif(n-5)),
    +                w=sample(7:9,n,replace=TRUE),
    +                v=sample(0:2,n,replace=TRUE),
    +                g1=factor(sample(c("A","B","C",NA),n,replace=TRUE)),
    +                g2=factor(sample(c("male","female","UNKNOWN"),n,replace=TRUE)),
    +                g3=sample(c("a","b","c","d"),n,replace=TRUE),
    +                stringsAsFactors=FALSE)
    +
    +# typical output of summary() for a numeric variable
    +summary(d$y)   
     #>    Min. 1st Qu.  Median    Mean 3rd Qu.    Max.    NA's 
     #>  0.0000  0.2667  0.5156  0.5131  0.7794  0.9975       3 
    -
    -# this function           
    -Summarize(d$y,digits=3)
    +
    +# this function           
    +Summarize(d$y,digits=3)
     #>        n   nvalid     mean       sd      min       Q1   median       Q3 
     #>  102.000   99.000    0.513    0.314    0.000    0.267    0.516    0.779 
     #>      max percZero 
     #>    0.998    2.020 
    -Summarize(~y,data=d,digits=3)
    +Summarize(~y,data=d,digits=3)
     #>        n   nvalid     mean       sd      min       Q1   median       Q3 
     #>  102.000   99.000    0.513    0.314    0.000    0.267    0.516    0.779 
     #>      max percZero 
     #>    0.998    2.020 
    -Summarize(y~1,data=d,digits=3)
    +Summarize(y~1,data=d,digits=3)
     #>        n   nvalid     mean       sd      min       Q1   median       Q3 
     #>  102.000   99.000    0.513    0.314    0.000    0.267    0.516    0.779 
     #>      max percZero 
     #>    0.998    2.020 
    -
    -# note that nvalid is not shown if there are no NAs and
    -#   percZero is not shown if there are no zeros
    -Summarize(~w,data=d,digits=3)
    +
    +# note that nvalid is not shown if there are no NAs and
    +#   percZero is not shown if there are no zeros
    +Summarize(~w,data=d,digits=3)
     #>       n    mean      sd     min      Q1  median      Q3     max 
     #> 102.000   7.804   0.809   7.000   7.000   8.000   8.000   9.000 
    -Summarize(~v,data=d,digits=3)
    +Summarize(~v,data=d,digits=3)
     #>        n     mean       sd      min       Q1   median       Q3      max 
     #>  102.000    0.980    0.856    0.000    0.000    1.000    2.000    2.000 
     #> percZero 
     #>   37.255 
    -
    -# note that the nvalid and percZero results can be forced to be shown
    -Summarize(~w,data=d,digits=3,nvalid="always",percZero="always")
    +
    +# note that the nvalid and percZero results can be forced to be shown
    +Summarize(~w,data=d,digits=3,nvalid="always",percZero="always")
     #>        n   nvalid     mean       sd      min       Q1   median       Q3 
     #>  102.000  102.000    7.804    0.809    7.000    7.000    8.000    8.000 
     #>      max percZero 
     #>    9.000    0.000 
    -
    -## Numeric vector by levels of a factor variable
    -Summarize(y~g1,data=d,digits=3)
    +
    +## Numeric vector by levels of a factor variable
    +Summarize(y~g1,data=d,digits=3)
     #>   g1  n nvalid  mean    sd   min    Q1 median    Q3   max percZero
     #> 1  A 21     21 0.513 0.337 0.000 0.254  0.535 0.854 0.974    4.762
     #> 2  B 27     25 0.530 0.323 0.000 0.259  0.528 0.808 0.963    4.000
     #> 3  C 27     26 0.516 0.286 0.023 0.302  0.441 0.754 0.998    0.000
    -Summarize(y~g2,data=d,digits=3)
    +Summarize(y~g2,data=d,digits=3)
     #>        g2  n nvalid  mean    sd   min    Q1 median    Q3   max percZero
     #> 1 UNKNOWN 35     35 0.538 0.284 0.000 0.376  0.516 0.766 0.988    2.857
     #> 2  female 37     34 0.519 0.336 0.000 0.240  0.521 0.838 0.998    2.941
     #> 3    male 30     30 0.478 0.329 0.003 0.189  0.515 0.755 0.963    0.000
    -Summarize(y~g2,data=d,digits=3,exclude="UNKNOWN")
    +Summarize(y~g2,data=d,digits=3,exclude="UNKNOWN")
     #>       g2  n nvalid  mean    sd   min    Q1 median    Q3   max percZero
     #> 1 female 37     34 0.519 0.336 0.000 0.240  0.521 0.838 0.998    2.941
     #> 2   male 30     30 0.478 0.329 0.003 0.189  0.515 0.755 0.963    0.000
    -
    -## Numeric vector by levels of two factor variables
    -Summarize(y~g1+g2,data=d,digits=3)
    +
    +## Numeric vector by levels of two factor variables
    +Summarize(y~g1+g2,data=d,digits=3)
     #>   g1      g2  n nvalid  mean    sd   min    Q1 median    Q3   max percZero
     #> 1  A UNKNOWN  6      6 0.542 0.342 0.124 0.323  0.467 0.843 0.953    0.000
     #> 2  B UNKNOWN 10     10 0.616 0.298 0.000 0.475  0.582 0.894 0.947   10.000
    @@ -205,7 +223,7 @@ 

    Examples

    #> 7 A male 6 6 0.444 0.344 0.017 0.213 0.394 0.708 0.894 0.000 #> 8 B male 9 9 0.429 0.354 0.003 0.147 0.280 0.709 0.963 0.000 #> 9 C male 8 8 0.578 0.248 0.186 0.360 0.720 0.746 0.819 0.000 -Summarize(y~g1+g2,data=d,digits=3,exclude="UNKNOWN") +Summarize(y~g1+g2,data=d,digits=3,exclude="UNKNOWN") #> g1 g2 n nvalid mean sd min Q1 median Q3 max percZero #> 1 A female 9 9 0.540 0.364 0.000 0.274 0.666 0.854 0.974 11.111 #> 2 B female 8 6 0.537 0.330 0.099 0.295 0.572 0.756 0.958 0.000 @@ -213,16 +231,16 @@

    Examples

    #> 4 A male 6 6 0.444 0.344 0.017 0.213 0.394 0.708 0.894 0.000 #> 5 B male 9 9 0.429 0.354 0.003 0.147 0.280 0.709 0.963 0.000 #> 6 C male 8 8 0.578 0.248 0.186 0.360 0.720 0.746 0.819 0.000 - -## What happens if RHS of formula is not a factor -Summarize(y~w,data=d,digits=3) + +## What happens if RHS of formula is not a factor +Summarize(y~w,data=d,digits=3) #> w n nvalid mean sd min Q1 median Q3 max percZero #> 1 7 45 44 0.585 0.323 0.000 0.320 0.707 0.879 0.974 2.273 #> 2 8 32 32 0.461 0.319 0.000 0.196 0.403 0.677 0.998 3.125 #> 3 9 25 23 0.448 0.271 0.003 0.295 0.421 0.635 0.953 0.000 - -## Summarizing multiple variables in a data.frame (must reduce to numerics) -lapply(as.list(d[,1:3]),Summarize,digits=4) + +## Summarizing multiple variables in a data.frame (must reduce to numerics) +lapply(as.list(d[,1:3]),Summarize,digits=4) #> $y #> n nvalid mean sd min Q1 median Q3 #> 102.0000 99.0000 0.5131 0.3142 0.0000 0.2667 0.5156 0.7794 @@ -239,29 +257,25 @@

    Examples

    #> percZero #> 37.2549 #> - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/WR79.html b/docs/reference/WR79.html index 406ec9fe..5627c25f 100644 --- a/docs/reference/WR79.html +++ b/docs/reference/WR79.html @@ -1,66 +1,66 @@ -Ages and lengths for a hypothetical sample from Westerheim and Ricker (1979). — WR79 • FSAAges and lengths for a hypothetical sample from Westerheim and Ricker (1979). — WR79 • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Ages and lengths for a hypothetical sample in Westerheim and Ricker (1979).

    -
    -

    Format

    +
    +

    Format

    A data frame of 2369 observations on the following 3 variables:

    ID

    Unique fish identifiers

    @@ -72,29 +72,29 @@

    Format

    -
    -

    Source

    -

    Simulated from Table 2A in Westerheim, S.J. and W.E. Ricker. 1979. Bias in using age-length key to estimate age-frequency distributions. Journal of the Fisheries Research Board of Canada. 35:184-189.

    +
    +

    Source

    +

    Simulated from Table 2A in Westerheim, S.J. and W.E. Ricker. 1979. Bias in using age-length key to estimate age-frequency distributions. Journal of the Fisheries Research Board of Canada. 35:184-189. CSV file

    -
    -

    Details

    +
    +

    Details

    Age-length data in 5-cm increments taken exactly from Table 2A of the source which was a sample from a hypothetical population in which year-class strength varied in the ratio 2:1 and the rate of increase in length decreased with age. Actual lengths in each 5-cm interval were simulated with a uniform distribution. The aged fish in this file were randomly selected and an assessed age was assigned according to the information in Table 2A.

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Age-Length Key

    -
    -

    Examples

    -
    str(WR79)
    +    
    +

    Examples

    +
    str(WR79)
     #> 'data.frame':	2369 obs. of  3 variables:
     #>  $ ID : int  1 2 3 4 5 6 7 8 9 10 ...
     #>  $ len: int  37 37 39 37 37 35 42 42 42 44 ...
     #>  $ age: int  NA NA NA NA 4 4 NA NA NA NA ...
    -head(WR79)
    +head(WR79)
     #>   ID len age
     #> 1  1  37  NA
     #> 2  2  37  NA
    @@ -102,45 +102,41 @@ 

    Examples

    #> 4 4 37 NA #> 5 5 37 4 #> 6 6 35 4 - -## Extract the aged sample -WR79.aged <- subset(WR79,!is.na(age)) -str(WR79.aged) + +## Extract the aged sample +WR79.aged <- subset(WR79,!is.na(age)) +str(WR79.aged) #> 'data.frame': 203 obs. of 3 variables: #> $ ID : int 5 6 21 32 40 57 59 70 94 117 ... #> $ len: int 37 35 42 43 40 41 44 46 45 47 ... #> $ age: int 4 4 4 4 4 4 4 4 4 4 ... - -## Extract the length sample -WR79.length <- subset(WR79,is.na(age)) -str(WR79.length) + +## Extract the length sample +WR79.length <- subset(WR79,is.na(age)) +str(WR79.length) #> 'data.frame': 2166 obs. of 3 variables: #> $ ID : int 1 2 3 4 7 8 9 10 11 12 ... #> $ len: int 37 37 39 37 42 42 42 44 44 43 ... #> $ age: int NA NA NA NA NA NA NA NA NA NA ... - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/WSlit.html b/docs/reference/WSlit.html index 141536a1..6fbba84f 100644 --- a/docs/reference/WSlit.html +++ b/docs/reference/WSlit.html @@ -1,66 +1,66 @@ -All known standard weight equations. — WSlit • FSAAll known standard weight equations. — WSlit • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Parameters for all known standard weight equations.

    -
    -

    Format

    +
    +

    Format

    A data frame with observations on the following 13 variables:

    species

    Species name.

    @@ -102,39 +102,39 @@

    Format

    -
    -

    Source

    +
    +

    Source

    Most of these equations can be found in Neumann, R.M., C.S. Guy, and D.W. Willis. 2012. Length, Weight, and Associated Indices. Chapter 14 in Zale, A.V., D.L. Parrish, and T.M. Sutton, editors. Fisheries Techniques. American Fisheries Society, Bethesda, MD.

    Some species were not in Neumann et al (2012) and are noted as such in the comments variable.

    -
    -

    Details

    +
    +

    Details

    The minimum TL for the English units were derived by rounding the converted minimum TL for the metric units to what seemed like common units (inches, half inches, or quarter inches).

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Relative weight

    • Standard weight

    • Condition

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    8-Condition.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    See also

    +
    +

    See also

    See wsVal and wrAdd for related functionality.

    -
    -

    Examples

    -
    str(WSlit)
    +    
    +

    Examples

    +
    str(WSlit)
     #> 'data.frame':	201 obs. of  13 variables:
     #>  $ species: chr  "Aegean Chub" "African Sharptooth Catfish" "Alabama Bass" "Ankara Nase" ...
     #>  $ units  : chr  "metric" "metric" "metric" "metric" ...
    @@ -149,7 +149,7 @@ 

    Examples

    #> $ quad : num 0.329 0.209 NA -0.971 NA ... #> $ source : chr "Giannetto et al. (2012)" "Emiroglu et al. (2018)" "Dicenzo et al. (1995)" "Emiroglu et al. (2020)" ... #> $ comment: chr "none" "only from Sakarya River Basin (Turkey)" "min.len not made clear (assumed same as Spotted Bass); same as Spotted Bass (Alabama subspecies)" "only from Turkey" ... -head(WSlit) +head(WSlit) #> species units type ref measure method min.len #> 1 Aegean Chub metric quadratic 75 TL EmP 70 #> 2 African Sharptooth Catfish metric quadratic 75 TL EmP 180 @@ -171,29 +171,25 @@

    Examples

    #> 4 only from Turkey #> 5 authors note that either RLP or EmP method may be used #> 6 authors note that either RLP or EmP method may be used - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/WhitefishLC.html b/docs/reference/WhitefishLC.html index dcaf8b6e..e60610ba 100644 --- a/docs/reference/WhitefishLC.html +++ b/docs/reference/WhitefishLC.html @@ -1,66 +1,66 @@ -Assigned ages from two readers on three structures for Lake Whitefish from Lake Champlain. — WhitefishLC • FSAAssigned ages from two readers on three structures for Lake Whitefish from Lake Champlain. — WhitefishLC • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Assigned ages from two readers on three structures for Lake Whitefish (Coregonus clupeaformis) from Lake Champlain in 2009.

    -
    -

    Format

    +
    +

    Format

    A data frame with 151 observations on the following 11 variables:

    fishID

    A unique fish identification number

    @@ -96,12 +96,12 @@

    Format

    -
    -

    Source

    -

    Data from Herbst, S.J. and J.E. Marsden. 2011. Comparison of precision and bias of scale, fin ray, and otolith age estimates for lake whitefish (Coregonus clupeaformis) in Lake Champlain. Journal of Great Lakes Research. 37:386-389. Contributed by Seth Herbst. Do not use for other than educational purposes without permission from the author. [Was (is?) from http://www.uvm.edu/rsenr/emarsden/documents/Herbst%20and%20Marsden%20whitefish%20age%20structure%20comparison.pdf.]

    +
    +

    Source

    +

    Data from Herbst, S.J. and J.E. Marsden. 2011. Comparison of precision and bias of scale, fin ray, and otolith age estimates for lake whitefish (Coregonus clupeaformis) in Lake Champlain. Journal of Great Lakes Research. 37:386-389. Contributed by Seth Herbst. Do not use for other than educational purposes without permission from the author. CSV file

    -
    -

    Topic(s)

    +
    +

    Topic(s)

    • Age

    • @@ -110,14 +110,14 @@

      Topic(s)

    • Bias

    • Age Comparisons

    -
    -

    See also

    +
    +

    See also

    Used in ageBias and agePrecision examples.

    -
    -

    Examples

    -
    str(WhitefishLC)
    +    
    +

    Examples

    +
    str(WhitefishLC)
     #> 'data.frame':	151 obs. of  11 variables:
     #>  $ fishID  : int  1 2 3 4 5 6 7 8 9 10 ...
     #>  $ tl      : int  345 334 348 300 330 316 508 475 340 173 ...
    @@ -130,7 +130,7 @@ 

    Examples

    #> $ otolith1: int 3 3 3 3 3 6 9 11 3 1 ... #> $ otolith2: int 3 3 3 3 3 5 10 12 4 1 ... #> $ otolithC: int 3 3 3 3 3 6 10 11 4 1 ... -head(WhitefishLC) +head(WhitefishLC) #> fishID tl scale1 scale2 scaleC finray1 finray2 finrayC otolith1 otolith2 #> 1 1 345 3 3 3 3 3 3 3 3 #> 2 2 334 4 3 4 3 3 3 3 3 @@ -145,29 +145,25 @@

    Examples

    #> 4 3 #> 5 3 #> 6 6 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/addZeroCatch.html b/docs/reference/addZeroCatch.html index 15da496c..ca2c7904 100644 --- a/docs/reference/addZeroCatch.html +++ b/docs/reference/addZeroCatch.html @@ -1,120 +1,132 @@ -Adds zeros for catches of species not collected in some sampling events. — addZeroCatch • FSAAdds zeros for catches of species not collected in some sampling events. — addZeroCatch • FSA + + Skip to contents -
    -
    -
    - +
    +
    +
    -
    +

    Adds zeros for catches of species that were not captured in a sampling event but were captured in at least one other sampling event (i.e., adds zeros to the data.frame for capture events where a species was not observed).

    -
    -
    addZeroCatch(df, eventvar, specvar, zerovar, na.rm = TRUE)
    +
    +

    Usage

    +
    addZeroCatch(df, eventvar, specvar, zerovar, na.rm = TRUE)
    -
    -

    Arguments

    +
    +

    Arguments

    df

    A data.frame that contains the capture summary data as described in the details.

    + +
    eventvar

    A string for the variable that identifies unique capture events.

    + +
    specvar

    A string or vector of strings for the variable(s) that identify the “species” (if multiple variables, could be species, sex, and life stage, for example) captured. See examples.

    + +
    zerovar

    A string or vector of strings for the variable(s) that should be set equal to zero. See details and examples.

    + +
    na.rm

    A logical that indicates if rows where specvar that are NA should be removed after adding the zeros. See details.

    +
    -
    -

    Value

    -

    A data.frame with the same structure as df but with rows of zero observation data appended.

    +
    +

    Value

    + + +

    A data.frame with the same structure as df but with rows of zero observation data appended.

    -
    -

    Details

    +
    +

    Details

    The data.frame in df must contain a column that identifies a unique capture event (given in eventvar), a column with the name for the species captured (given in specvar), and a column that contains the number of that species captured (potentially given to zerovar; see details). All sampling event and species combinations where catch information does not exist is identified and a new data.frame that contains a zero for the catch for all of these combinations is created. This new data.frame is appended to the original data.frame to construct a data.frame that contains complete catch information -- i.e., including zeros for species in events where that species was not captured.

    The data.frame may contain other information related to the catch, such as number of recaptured fish, number of fish released, etc. These additional variables can be included in zerovar so that zeros will be added to these variables as well (e.g., if the catch of the species is zero, then the number of recaptures must also be zero). All variables not given in eventvar, specvar, or zerovar will be assumed to be related to eventvar and specvar (e.g., date, gear type, and habitat) and, thus, will be repeated with these variables.

    In situations where no fish were captured in some events, the df may contain rows that have a value for eventvar but not for specvar. These rows are important because zeros need to be added for each observed species for these events. However, in these situations, a <NA> species will appear in the resulting data.frame. It is unlikely that these “missing” species are needed so they will be removed if na.rm=TRUE (DEFAULT) is used.

    One should test the results of this function by creating a frequency table of the eventvar or specvar. In either case, the table should contain the same value in each cell of the table. See the examples.

    -
    -

    Note

    +
    +

    Note

    An error will be returned if either specvar or eventvar are factors with any NA levels. This usually arises if the data.frame was subsetted/filtered prior to using addZeroCatch. See droplevels for descriptions of how to drop unused levels.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    2-Basic Data Manipulations

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    See also

    +
    +

    See also

    complete in tidyr package.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Example Data #1 (some nets missing some fish, ancillary net data)
    -df1 <- data.frame(net=c(1,1,1,2,2,3),
    -                  eff=c(1,1,1,1,1,1),
    -                  species=c("BKT","LKT","RBT","BKT","LKT","RBT"),
    -                  catch=c(3,4,5,5,4,3))
    -df1
    +    
    +

    Examples

    +
    ## Example Data #1 (some nets missing some fish, ancillary net data)
    +df1 <- data.frame(net=c(1,1,1,2,2,3),
    +                  eff=c(1,1,1,1,1,1),
    +                  species=c("BKT","LKT","RBT","BKT","LKT","RBT"),
    +                  catch=c(3,4,5,5,4,3))
    +df1
     #>   net eff species catch
     #> 1   1   1     BKT     3
     #> 2   1   1     LKT     4
    @@ -122,16 +134,16 @@ 

    Examples

    #> 4 2 1 BKT 5 #> 5 2 1 LKT 4 #> 6 3 1 RBT 3 -# not all 1s -xtabs(~net+species,data=df1) +# not all 1s +xtabs(~net+species,data=df1) #> species #> net BKT LKT RBT #> 1 1 1 1 #> 2 1 1 0 #> 3 0 0 1 - -df1mod1 <- addZeroCatch(df1,"net","species",zerovar="catch") -df1mod1 + +df1mod1 <- addZeroCatch(df1,"net","species",zerovar="catch") +df1mod1 #> net eff species catch #> 1 1 1 BKT 3 #> 2 1 1 LKT 4 @@ -142,34 +154,34 @@

    Examples

    #> 7 3 1 BKT 0 #> 8 3 1 LKT 0 #> 9 2 1 RBT 0 -# check, should all be 3 -xtabs(~net,data=df1mod1) +# check, should all be 3 +xtabs(~net,data=df1mod1) #> net #> 1 2 3 #> 3 3 3 -# check, should all be 1 -xtabs(~net+species,data=df1mod1) +# check, should all be 1 +xtabs(~net+species,data=df1mod1) #> species #> net BKT LKT RBT #> 1 1 1 1 #> 2 1 1 1 #> 3 1 1 1 -# correct mean/sd of catches -Summarize(catch~species,data=df1mod1) +# correct mean/sd of catches +Summarize(catch~species,data=df1mod1) #> species n mean sd min Q1 median Q3 max percZero #> 1 BKT 3 2.666667 2.516611 0 1.5 3 4 5 33.33333 #> 2 LKT 3 2.666667 2.309401 0 2.0 4 4 4 33.33333 #> 3 RBT 3 2.666667 2.516611 0 1.5 3 4 5 33.33333 -# incorrect mean/sd of catches (no zeros) -Summarize(catch~species,data=df1) +# incorrect mean/sd of catches (no zeros) +Summarize(catch~species,data=df1) #> species n mean sd min Q1 median Q3 max #> 1 BKT 2 4 1.414214 3 3.5 4 4.5 5 #> 2 LKT 2 4 0.000000 4 4.0 4 4.0 4 #> 3 RBT 2 4 1.414214 3 3.5 4 4.5 5 - -# Same as example 1 but with no ancillary data specific to the net number -df2 <- df1[,-2] -df2 + +# Same as example 1 but with no ancillary data specific to the net number +df2 <- df1[,-2] +df2 #> net species catch #> 1 1 BKT 3 #> 2 1 LKT 4 @@ -177,8 +189,8 @@

    Examples

    #> 4 2 BKT 5 #> 5 2 LKT 4 #> 6 3 RBT 3 -df1mod2 <- addZeroCatch(df2,"net","species",zerovar="catch") -df1mod2 +df1mod2 <- addZeroCatch(df2,"net","species",zerovar="catch") +df1mod2 #> net species catch #> 1 1 BKT 3 #> 2 1 LKT 4 @@ -189,21 +201,21 @@

    Examples

    #> 7 3 BKT 0 #> 8 3 LKT 0 #> 9 2 RBT 0 -# check, should all be 1 -xtabs(~net+species,data=df1mod2) +# check, should all be 1 +xtabs(~net+species,data=df1mod2) #> species #> net BKT LKT RBT #> 1 1 1 1 #> 2 1 1 1 #> 3 1 1 1 - -## Example Data #3 (All nets have same species ... no zeros needed) -df3 <- data.frame(net=c(1,1,1,2,2,2,3,3,3), - eff=c(1,1,1,1,1,1,1,1,1), - species=c("BKT","LKT","RBT","BKT","LKT", - "RBT","BKT","LKT","RBT"), - catch=c(3,4,5,5,4,3,3,2,7)) -df3 + +## Example Data #3 (All nets have same species ... no zeros needed) +df3 <- data.frame(net=c(1,1,1,2,2,2,3,3,3), + eff=c(1,1,1,1,1,1,1,1,1), + species=c("BKT","LKT","RBT","BKT","LKT", + "RBT","BKT","LKT","RBT"), + catch=c(3,4,5,5,4,3,3,2,7)) +df3 #> net eff species catch #> 1 1 1 BKT 3 #> 2 1 1 LKT 4 @@ -214,29 +226,29 @@

    Examples

    #> 7 3 1 BKT 3 #> 8 3 1 LKT 2 #> 9 3 1 RBT 7 -# should all be 1 for this example -xtabs(~net+species,data=df3) +# should all be 1 for this example +xtabs(~net+species,data=df3) #> species #> net BKT LKT RBT #> 1 1 1 1 #> 2 1 1 1 #> 3 1 1 1 - -# should receive a warning and table should still all be 1 -df3mod1 <- addZeroCatch(df3,"net","species",zerovar="catch") + +# should receive a warning and table should still all be 1 +df3mod1 <- addZeroCatch(df3,"net","species",zerovar="catch") #> Warning: All 'eventvar' have all species in 'specvar'; thus, there are no #> zeros to add. The original data.frame was returned. -xtabs(~net+species,data=df3mod1) +xtabs(~net+species,data=df3mod1) #> species #> net BKT LKT RBT #> 1 1 1 1 #> 2 1 1 1 #> 3 1 1 1 - -## Example Data #4 (another variable that needs zeros) -df4 <- df1 -df4$recaps <- c(0,0,0,1,2,1) -df4 + +## Example Data #4 (another variable that needs zeros) +df4 <- df1 +df4$recaps <- c(0,0,0,1,2,1) +df4 #> net eff species catch recaps #> 1 1 1 BKT 3 0 #> 2 1 1 LKT 4 0 @@ -244,17 +256,17 @@

    Examples

    #> 4 2 1 BKT 5 1 #> 5 2 1 LKT 4 2 #> 6 3 1 RBT 3 1 -# not all 1s -xtabs(~net+species,data=df4) +# not all 1s +xtabs(~net+species,data=df4) #> species #> net BKT LKT RBT #> 1 1 1 1 #> 2 1 1 0 #> 3 0 0 1 - -df4mod1 <- addZeroCatch(df4,"net","species",zerovar=c("catch","recaps")) -# note zeros in both variables -df4mod1 + +df4mod1 <- addZeroCatch(df4,"net","species",zerovar=c("catch","recaps")) +# note zeros in both variables +df4mod1 #> net eff species catch recaps #> 1 1 1 BKT 3 0 #> 2 1 1 LKT 4 0 @@ -265,40 +277,40 @@

    Examples

    #> 7 3 1 BKT 0 0 #> 8 3 1 LKT 0 0 #> 9 2 1 RBT 0 0 -# check, should all be 1 -xtabs(~net+species,data=df4mod1) +# check, should all be 1 +xtabs(~net+species,data=df4mod1) #> species #> net BKT LKT RBT #> 1 1 1 1 #> 2 1 1 1 #> 3 1 1 1 -# observe difference from next -Summarize(catch~species,data=df4) +# observe difference from next +Summarize(catch~species,data=df4) #> species n mean sd min Q1 median Q3 max #> 1 BKT 2 4 1.414214 3 3.5 4 4.5 5 #> 2 LKT 2 4 0.000000 4 4.0 4 4.0 4 #> 3 RBT 2 4 1.414214 3 3.5 4 4.5 5 -Summarize(catch~species,data=df4mod1) +Summarize(catch~species,data=df4mod1) #> species n mean sd min Q1 median Q3 max percZero #> 1 BKT 3 2.666667 2.516611 0 1.5 3 4 5 33.33333 #> 2 LKT 3 2.666667 2.309401 0 2.0 4 4 4 33.33333 #> 3 RBT 3 2.666667 2.516611 0 1.5 3 4 5 33.33333 -# observe difference from next -Summarize(recaps~species,data=df4) +# observe difference from next +Summarize(recaps~species,data=df4) #> species n mean sd min Q1 median Q3 max percZero #> 1 BKT 2 0.5 0.7071068 0 0.25 0.5 0.75 1 50 #> 2 LKT 2 1.0 1.4142136 0 0.50 1.0 1.50 2 50 #> 3 RBT 2 0.5 0.7071068 0 0.25 0.5 0.75 1 50 -Summarize(recaps~species,data=df4mod1) +Summarize(recaps~species,data=df4mod1) #> species n mean sd min Q1 median Q3 max percZero #> 1 BKT 3 0.3333333 0.5773503 0 0 0 0.5 1 66.66667 #> 2 LKT 3 0.6666667 1.1547005 0 0 0 1.0 2 66.66667 #> 3 RBT 3 0.3333333 0.5773503 0 0 0 0.5 1 66.66667 - -## Example Data #5 (two "specvar"s) -df5 <- df1 -df5$sex <- c("m","m","f","m","f","f") -df5 + +## Example Data #5 (two "specvar"s) +df5 <- df1 +df5$sex <- c("m","m","f","m","f","f") +df5 #> net eff species catch sex #> 1 1 1 BKT 3 m #> 2 1 1 LKT 4 m @@ -306,8 +318,8 @@

    Examples

    #> 4 2 1 BKT 5 m #> 5 2 1 LKT 4 f #> 6 3 1 RBT 3 f -# not all 1s -xtabs(~sex+species+net,data=df5) +# not all 1s +xtabs(~sex+species+net,data=df5) #> , , net = 1 #> #> species @@ -329,9 +341,9 @@

    Examples

    #> f 0 0 1 #> m 0 0 0 #> - -df5mod1 <- addZeroCatch(df5,"net",c("species","sex"),zerovar="catch") -df5mod1 + +df5mod1 <- addZeroCatch(df5,"net",c("species","sex"),zerovar="catch") +df5mod1 #> net eff species catch sex #> 1 1 1 BKT 3 m #> 2 1 1 LKT 4 m @@ -351,8 +363,8 @@

    Examples

    #> 16 1 1 RBT 0 m #> 17 2 1 RBT 0 m #> 18 3 1 RBT 0 m -# all 1s -xtabs(~sex+species+net,data=df5mod1) +# all 1s +xtabs(~sex+species+net,data=df5mod1) #> , , net = 1 #> #> species @@ -374,18 +386,18 @@

    Examples

    #> f 1 1 1 #> m 1 1 1 #> -str(df5mod1) +str(df5mod1) #> 'data.frame': 18 obs. of 5 variables: #> $ net : num 1 1 1 2 2 3 1 2 3 1 ... #> $ eff : num 1 1 1 1 1 1 1 1 1 1 ... #> $ species: chr "BKT" "LKT" "RBT" "BKT" ... #> $ catch : num 3 4 5 5 4 3 0 0 0 0 ... #> $ sex : chr "m" "m" "f" "m" ... - -## Example Data #6 (three "specvar"s) -df6 <- df5 -df6$size <- c("lrg","lrg","lrg","sm","lrg","sm") -df6 + +## Example Data #6 (three "specvar"s) +df6 <- df5 +df6$size <- c("lrg","lrg","lrg","sm","lrg","sm") +df6 #> net eff species catch sex size #> 1 1 1 BKT 3 m lrg #> 2 1 1 LKT 4 m lrg @@ -393,9 +405,9 @@

    Examples

    #> 4 2 1 BKT 5 m sm #> 5 2 1 LKT 4 f lrg #> 6 3 1 RBT 3 f sm - -df6mod1 <- addZeroCatch(df6,"net",c("species","sex","size"),zerovar="catch") -df6mod1 + +df6mod1 <- addZeroCatch(df6,"net",c("species","sex","size"),zerovar="catch") +df6mod1 #> net eff species catch sex size #> 1 1 1 BKT 3 m lrg #> 2 1 1 LKT 4 m lrg @@ -433,29 +445,25 @@

    Examples

    #> 34 1 1 RBT 0 m sm #> 35 2 1 RBT 0 m sm #> 36 3 1 RBT 0 m sm - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/ageBias.html b/docs/reference/ageBias.html index 24b23666..bccc205d 100644 --- a/docs/reference/ageBias.html +++ b/docs/reference/ageBias.html @@ -1,223 +1,315 @@ -Compute and view possible differences between paired sets of ages. — ageBias • FSACompute and view possible differences between paired sets of ages. — ageBias • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Constructs age-agreement tables, statistical tests to detect differences, and plots to visualize potential differences in paired age estimates. Ages may be from, for example, two readers of the same structure, one reader at two times, two structures (e.g., scales, spines, otoliths), or one structure and known ages.

    -
    -
    ageBias(
    -  formula,
    -  data,
    -  ref.lab = tmp$Enames,
    -  nref.lab = tmp$Rname,
    -  method = stats::p.adjust.methods,
    -  sig.level = 0.05,
    -  min.n.CI = 3
    -)
    -
    -# S3 method for ageBias
    -summary(
    -  object,
    -  what = c("table", "symmetry", "Bowker", "EvansHoenig", "McNemar", "bias",
    -    "diff.bias", "n"),
    -  flip.table = FALSE,
    -  zero.print = "-",
    -  digits = 3,
    -  cont.corr = c("none", "Yates", "Edwards"),
    -  ...
    -)
    -
    -# S3 method for ageBias
    -plot(
    -  x,
    -  xvals = c("reference", "mean"),
    -  xlab = ifelse(xvals == "reference", x$ref.lab, "Mean Age"),
    -  ylab = paste(x$nref.lab, "-", x$ref.lab),
    -  xlim = NULL,
    -  ylim = NULL,
    -  yaxt = graphics::par("yaxt"),
    -  xaxt = graphics::par("xaxt"),
    -  col.agree = "gray60",
    -  lwd.agree = lwd,
    -  lty.agree = 2,
    -  lwd = 1,
    -  sfrac = 0,
    -  show.pts = NULL,
    -  pch.pts = 20,
    -  cex.pts = ifelse(xHist | yHist, 1.5, 1),
    -  col.pts = "black",
    -  transparency = 1/10,
    -  show.CI = FALSE,
    -  col.CI = "black",
    -  col.CIsig = "red",
    -  lwd.CI = lwd,
    -  sfrac.CI = sfrac,
    -  show.range = NULL,
    -  col.range = ifelse(show.CI, "gray70", "black"),
    -  lwd.range = lwd,
    -  sfrac.range = sfrac,
    -  pch.mean = 19,
    -  pch.mean.sig = ifelse(show.CI | show.range, 21, 19),
    -  cex.mean = lwd,
    -  yHist = TRUE,
    -  xHist = NULL,
    -  hist.panel.size = 1/7,
    -  col.hist = "gray90",
    -  allowAdd = FALSE,
    -  ...
    -)
    +
    +

    Usage

    +
    ageBias(
    +  formula,
    +  data,
    +  ref.lab = tmp$Enames,
    +  nref.lab = tmp$Rname,
    +  method = stats::p.adjust.methods,
    +  sig.level = 0.05,
    +  min.n.CI = 3
    +)
    +
    +# S3 method for ageBias
    +summary(
    +  object,
    +  what = c("table", "symmetry", "Bowker", "EvansHoenig", "McNemar", "bias", "diff.bias",
    +    "n"),
    +  flip.table = FALSE,
    +  zero.print = "-",
    +  digits = 3,
    +  cont.corr = c("none", "Yates", "Edwards"),
    +  ...
    +)
    +
    +# S3 method for ageBias
    +plot(
    +  x,
    +  xvals = c("reference", "mean"),
    +  xlab = ifelse(xvals == "reference", x$ref.lab, "Mean Age"),
    +  ylab = paste(x$nref.lab, "-", x$ref.lab),
    +  xlim = NULL,
    +  ylim = NULL,
    +  yaxt = graphics::par("yaxt"),
    +  xaxt = graphics::par("xaxt"),
    +  col.agree = "gray60",
    +  lwd.agree = lwd,
    +  lty.agree = 2,
    +  lwd = 1,
    +  sfrac = 0,
    +  show.pts = NULL,
    +  pch.pts = 20,
    +  cex.pts = ifelse(xHist | yHist, 1.5, 1),
    +  col.pts = "black",
    +  transparency = 1/10,
    +  show.CI = FALSE,
    +  col.CI = "black",
    +  col.CIsig = "red",
    +  lwd.CI = lwd,
    +  sfrac.CI = sfrac,
    +  show.range = NULL,
    +  col.range = ifelse(show.CI, "gray70", "black"),
    +  lwd.range = lwd,
    +  sfrac.range = sfrac,
    +  pch.mean = 19,
    +  pch.mean.sig = ifelse(show.CI | show.range, 21, 19),
    +  cex.mean = lwd,
    +  yHist = TRUE,
    +  xHist = NULL,
    +  hist.panel.size = 1/7,
    +  col.hist = "gray90",
    +  allowAdd = FALSE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    formula

    A formula of the form nrefvar~refvar, where nrefvar and refvar generically represent variables that contain the paired “nonreference” and “reference” age estimates, respectively. See details.

    + +
    data

    A data.frame that minimally contains the paired age estimates given in formula.

    + +
    ref.lab

    A string label for the reference age estimates.

    + +
    nref.lab

    A string label for the nonreference age estimates.

    + +
    method

    A string for which method to use to adjust p-values for multiple comparisons. See ?p.adjust.methods.

    + +
    sig.level

    A numeric value to determine whether a p-value indicates a significant result. The confidence level used in plot is 100*(1-sig.level).

    + +
    min.n.CI

    A numeric value (default is 3) that is the smallest sample size for which a confidence interval will be computed.

    + +
    what

    A string that indicates what type of summary to print or plot to construct. See details.

    + +
    flip.table

    A logical that indicates whether the age-agreement table should be ‘flipped’ (i.e., rows are reversed so that younger ages are at the bottom of the table). This makes the table more directly comparable to the age bias plot.

    + +
    zero.print

    A string for what should be printed in place of the zeros on an age-agreement table. The default is to print a single dash.

    + +
    digits

    A numeric for the minimum number of digits to print when showing what="bias" or what="diff.bias" in summary.

    + +
    cont.corr

    A string that indicates the continuity correction method to be used with (only) McNemar test. If "none" (default) then no continuity correction is used, if "Yates" then 0.5 is used, and if "Edwards" then 1 is used.

    + +
    ...

    Additional arguments for methods.

    + +
    x, object

    An object of class ageBias, usually a result from ageBias.

    + +
    xvals

    A string for whether the x-axis values are reference ages or mean of the reference and nonreference ages.

    + +
    xlab, ylab

    A string label for the x-axis (reference) or y-axis (non-reference) age estimates, respectively.

    + +
    xlim, ylim

    A numeric vector of length 2 that contains the limits of the x-axis (reference ages) or y-axis (non-reference ages), respectively.

    + +
    xaxt, yaxt

    A string which specifies the x- and y-axis types. Specifying “n” suppresses plotting of the axis. See ?par.

    + +
    col.agree

    A string or numeric for the color of the 1:1 or zero (if difference=TRUE) reference line.

    + +
    lwd.agree

    A numeric for the line width of the 1:1 or zero (if difference=TRUE) reference line.

    + +
    lty.agree

    A numeric for the line type of the 1:1 or zero (if difference=TRUE) reference line.

    + +
    lwd

    A numeric that controls the separate ‘lwd’ argument (e.g., lwd.CI and lwd.range).

    + +
    sfrac

    A numeric that controls the separate ‘sfrac’ arguments (e.g., sfrac.CI and sfrac.range). See sfrac in plotCI of plotrix.

    + +
    show.pts

    A logical for whether or not the raw data points are plotted.

    + +
    pch.pts

    A numeric for the plotting character of the raw data points.

    + +
    cex.pts

    A character expansion value for the size of the symbols for pch.pts.

    + +
    col.pts

    A string or numeric for the color of the raw data points. The default is to use black with the transparency found in transparency.

    + +
    transparency

    A numeric (between 0 and 1) for the level of transparency of the raw data points. This number of points plotted on top of each other will represent the color in col.pts.

    + +
    show.CI

    A logical for whether confidence intervals should be plotted or not.

    + +
    col.CI

    A string or numeric for the color of confidence interval bars that are considered non-significant.

    + +
    col.CIsig

    A string or numeric for the color of confidence interval bars that are considered significant.

    + +
    lwd.CI

    A numeric for the line width of the confidence interval bars.

    + +
    sfrac.CI

    A numeric for the size of the ends of the confidence interval bars. See sfrac in plotCI of plotrix.

    + +
    show.range

    A logical for whether or not vertical bars that represent the range of the data points are plotted.

    + +
    col.range

    A string or numeric for the color of the range intervals.

    + +
    lwd.range

    A numeric for the line width of the range intervals.

    + +
    sfrac.range

    A numeric for the size of the ends of the range intervals. See sfrac in plotCI of plotrix.

    + +
    pch.mean

    A numeric for the plotting character used for the mean values when the means are considered insignificant.

    + +
    pch.mean.sig

    A numeric for the plotting character for the mean values when the means are considered significant.

    + +
    cex.mean

    A character expansion value for the size of the mean symbol in pch.mean and pch.mean.sig.

    + +
    yHist

    A logical for whether a histogram of the y-axis variable should be added to the right margin of the age bias plot. See details.

    + +
    xHist

    A logical for whether a histogram of the x-axis variable should be added to the top margin of the age bias plot. See details.

    + +
    hist.panel.size

    A numeric between 0 and 1 that indicates the proportional size of histograms (relative to the entire plotting pane) in the plot margins (only used if xHist=TRUE or yHist=TRUE).

    + +
    col.hist

    A string for the color of the bars in the marginal histograms (only used if xHist=TRUE or yHist=TRUE).

    + +
    allowAdd

    A logical that will allow the user to add items to the main (i.e., not the marginal histograms) plot panel (if TRUE). Defaults to FALSE.

    +
    -
    -

    Value

    -

    ageBias returns a list with the following items:

    • data A data.frame with the original paired age estimates and the difference between those estimates.

    • +
      +

      Value

      + + +

      ageBias returns a list with the following items:

      • data A data.frame with the original paired age estimates and the difference between those estimates.

      • agree The age-agreement table.

      • bias A data.frame that contains the bias statistics.

      • bias.diff A data.frame that contains the bias statistics for the differences.

      • @@ -225,8 +317,8 @@

        Value

      • nref.lab A string that contains an optional label for the age estimates in the rows (non-reference) of the age-agreement table.

      summary returns the result if what= contains one item, otherwise it returns nothing. Nothing is returned by plot or plotAB, but see details for a description of the plot that is produced.

      -
      -

      Details

      +
      +

      Details

      Generally, one of the two age estimates will be identified as the “reference” set. In some cases this may be the true ages, the ages from the more experienced reader, the ages from the first reading, or the ages from the structure generally thought to provide the most accurate results. In other cases, such as when comparing two novice readers, the choice may be arbitrary. The reference ages will form the columns of the age-agreement table and will be the “constant” age used in the t-tests and age bias plots (i.e., the x-axis). See further details below.

      The age-agreement table is constructed with what="table" in summary. The agreement table can be “flipped” (i.e., the rows in descending rather than ascending order) with flip.table=TRUE. By default, the tables are shown with zeros replaced by dashes. This behavior can be changed with zero.print.

      Three statistical tests of symmetry for the age-agreement table can be computed with what= in summary. The “unpooled” or Bowker test as described in Hoenig et al. (1995) is constructed with what="Bowker", the “semi-pooled” or Evans-Hoenig test as described in Evans and Hoenig (1998) is constructed with what="EvansHoenig", and the “pooled” or McNemar test as described in Evans and Hoenig (1998) is constructed with what="McNemar". All three tests are computed with what="symmetry".

      @@ -236,37 +328,37 @@

      Details

      Individual t-tests to determine if the mean age of the nonreference set at a particular age of the reference set is equal to the reference age (e.g., is the mean age of the nonreference set at age-3 of the reference set statistically different from 3?) are shown with what="bias" in summary. The results provide a column that indicates whether the difference is significant or not as determined by adjusted p-values from the t-tests and using the significance level provided in sig.level (defaults to 0.05). Similar results for the difference in ages (e.g., is the mean reference age minus nonreference age at a nonreference age of 3 different from 0?) are constructed with what="diff.bias" in summary.

      The sample size present in the age-agreement table is found with what="n".

      -
      -

      Testing

      -

      Tested all symmetry test results against results in Evans and Hoenig (2008), the McNemar and Evans-Hoenig results against results from compare2 in fishmethods, and all results using the AlewifeLH data set from FSAdata against results from the online resource at http://www.nefsc.noaa.gov/fbp/age-prec/.

      +
      +

      Testing

      +

      Tested all symmetry test results against results in Evans and Hoenig (2008), the McNemar and Evans-Hoenig results against results from compare2 in fishmethods, and all results using the AlewifeLH data set from FSAdata against results from the online resource at http://www.nefsc.noaa.gov/fbp/age-prec/.

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      4-Age Comparisons. Note that plot has changed since IFAR was published. Some of the original functionality is in plotAB.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Campana, S.E., M.C. Annand, and J.I. McMillan. 1995. Graphical and statistical methods for determining the consistency of age determinations. Transactions of the American Fisheries Society 124:131-138. [Was (is?) available from http://www.bio.gc.ca/otoliths/documents/Campana%20et%20al%201995%20TAFS.pdf.]

      Evans, G.T. and J.M. Hoenig. 1998. Testing and viewing symmetry in contingency tables, with application to readers of fish ages. Biometrics 54:620-629.

      Hoenig, J.M., M.J. Morgan, and C.A. Brown. 1995. Analysing differences between two age determination methods by tests of symmetry. Canadian Journal of Fisheries and Aquatic Sciences 52:364-368.

      McBride, R.S. 2015. Diagnosis of paired age agreement: A simulation approach of accuracy and precision effects. ICES Journal of Marine Science 72:2149-2167.

      Muir, A.M., M.P. Ebener, J.X. He, and J.E. Johnson. 2008. A comparison of the scale and otolith methods of age estimation for lake whitefish in Lake Huron. North American Journal of Fisheries Management 28:625-635. [Was (is?) available from http://www.tandfonline.com/doi/abs/10.1577/M06-160.1]

      -
      -

      See also

      +
      +

      See also

      See agePrecision for measures of precision between pairs of age estimates. See compare2 in fishmethods for similar functionality. See plotAB for a more traditional age-bias plot.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      ab1 <- ageBias(scaleC~otolithC,data=WhitefishLC,
      -               ref.lab="Otolith Age",nref.lab="Scale Age")
      -summary(ab1)
      +    
      +

      Examples

      +
      ab1 <- ageBias(scaleC~otolithC,data=WhitefishLC,
      +               ref.lab="Otolith Age",nref.lab="Scale Age")
      +summary(ab1)
       #> Sample size in the age agreement table is 151.
       #> 
       #> Summary of Scale Age by Otolith Age 
      @@ -347,24 +439,24 @@ 

      Examples

      #> 1 McNemar 1 51.57851 6.879037e-13 #> 2 EvansHoenig 10 62.46849 1.232555e-09 #> 3 Bowker 54 75.97662 2.598188e-02 -summary(ab1,what="symmetry") +summary(ab1,what="symmetry") #> symTest df chi.sq p #> 1 McNemar 1 51.57851 6.879037e-13 #> 2 EvansHoenig 10 62.46849 1.232555e-09 #> 3 Bowker 54 75.97662 2.598188e-02 -summary(ab1,what="Bowker") +summary(ab1,what="Bowker") #> symTest df chi.sq p #> 3 Bowker 54 75.97662 0.02598188 -summary(ab1,what="EvansHoenig") +summary(ab1,what="EvansHoenig") #> symTest df chi.sq p #> 2 EvansHoenig 10 62.46849 1.232555e-09 -summary(ab1,what="McNemar") +summary(ab1,what="McNemar") #> symTest df chi.sq p #> 1 McNemar 1 51.57851 6.879037e-13 -summary(ab1,what="McNemar",cont.corr="Yates") +summary(ab1,what="McNemar",cont.corr="Yates") #> symTest df chi.sq p #> 1 McNemar (Yates Correction) 1 50.92769 9.583228e-13 -summary(ab1,what="bias") +summary(ab1,what="bias") #> otolithC n min max mean SE t adj.p sig LCI UCI #> 1 9 1 2 1.44 0.176 2.530 0.28212 FALSE 1.039 1.85 #> 2 7 1 5 2.00 0.577 0.000 1.00000 FALSE 0.587 3.41 @@ -386,7 +478,7 @@

      Examples

      #> 18 2 7 13 10.00 NA NA NA FALSE NA NA #> 19 1 14 14 14.00 NA NA NA FALSE NA NA #> 23 1 10 10 10.00 NA NA NA FALSE NA NA -summary(ab1,what="diff.bias") +summary(ab1,what="diff.bias") #> otolithC n min max mean SE t adj.p sig LCI UCI #> 1 9 0 1 0.444 0.176 2.530 0.28212 FALSE 0.0393 0.850 #> 2 7 -1 3 0.000 0.577 0.000 1.00000 FALSE -1.4127 1.413 @@ -408,9 +500,9 @@

      Examples

      #> 18 2 -11 -5 -8.000 NA NA NA FALSE NA NA #> 19 1 -5 -5 -5.000 NA NA NA FALSE NA NA #> 23 1 -13 -13 -13.000 NA NA NA FALSE NA NA -summary(ab1,what="n") +summary(ab1,what="n") #> Sample size in the age agreement table is 151. -summary(ab1,what=c("n","symmetry","table")) +summary(ab1,what=c("n","symmetry","table")) #> Sample size in the age agreement table is 151. #> #> Raw agreement table (square) @@ -445,8 +537,8 @@

      Examples

      #> 1 McNemar 1 51.57851 6.879037e-13 #> 2 EvansHoenig 10 62.46849 1.232555e-09 #> 3 Bowker 54 75.97662 2.598188e-02 -# flip table (easy to compare to age bias plot) and show zeroes (not dashes) -summary(ab1,what="table",flip.table=TRUE,zero.print="0") +# flip table (easy to compare to age bias plot) and show zeroes (not dashes) +summary(ab1,what="table",flip.table=TRUE,zero.print="0") #> Otolith Age #> Scale Age 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 #> 23 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 @@ -472,67 +564,67 @@

      Examples

      #> 3 0 1 10 6 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #> 2 4 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #> 1 5 4 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - -############################################################# -## Differences Plot (inspired by Muir et al. (2008)) -# Default (ranges, open circles for sig diffs, marginal hists) -plot(ab1) + + +############################################################# +## Differences Plot (inspired by Muir et al. (2008)) +# Default (ranges, open circles for sig diffs, marginal hists) +plot(ab1) -# Show CIs for means (with and without ranges) -plot(ab1,show.CI=TRUE) +# Show CIs for means (with and without ranges) +plot(ab1,show.CI=TRUE) -plot(ab1,show.CI=TRUE,show.range=FALSE) +plot(ab1,show.CI=TRUE,show.range=FALSE) -# show points (with and without CIs) -plot(ab1,show.CI=TRUE,show.range=FALSE,show.pts=TRUE) +# show points (with and without CIs) +plot(ab1,show.CI=TRUE,show.range=FALSE,show.pts=TRUE) -plot(ab1,show.range=FALSE,show.pts=TRUE) +plot(ab1,show.range=FALSE,show.pts=TRUE) -# Use same symbols for all means (with ranges) -plot(ab1,pch.mean.sig=19) +# Use same symbols for all means (with ranges) +plot(ab1,pch.mean.sig=19) -# Use same symbols/colors for all means/CIs (without ranges) -plot(ab1,show.range=FALSE,show.CI=TRUE,pch.mean.sig=19,col.CIsig="black") +# Use same symbols/colors for all means/CIs (without ranges) +plot(ab1,show.range=FALSE,show.CI=TRUE,pch.mean.sig=19,col.CIsig="black") -# Remove histograms -plot(ab1,xHist=FALSE) +# Remove histograms +plot(ab1,xHist=FALSE) -plot(ab1,yHist=FALSE) +plot(ab1,yHist=FALSE) -plot(ab1,xHist=FALSE,yHist=FALSE) +plot(ab1,xHist=FALSE,yHist=FALSE) -## Suppress confidence intervals for n < a certain value -## must set this in the original ageBias() call -ab2 <- ageBias(scaleC~otolithC,data=WhitefishLC,min.n.CI=8, - ref.lab="Otolith Age",nref.lab="Scale Age") -plot(ab2,show.CI=TRUE,show.range=FALSE) +## Suppress confidence intervals for n < a certain value +## must set this in the original ageBias() call +ab2 <- ageBias(scaleC~otolithC,data=WhitefishLC,min.n.CI=8, + ref.lab="Otolith Age",nref.lab="Scale Age") +plot(ab2,show.CI=TRUE,show.range=FALSE) - - -############################################################# -## Differences Plot ( inspired by Bland-Altman plots in McBride (2015)) -plot(ab1,xvals="mean") + + +############################################################# +## Differences Plot ( inspired by Bland-Altman plots in McBride (2015)) +plot(ab1,xvals="mean") -## Modify axis limits -plot(ab1,xvals="mean",xlim=c(1,17)) +## Modify axis limits +plot(ab1,xvals="mean",xlim=c(1,17)) -## Add and remove histograms -plot(ab1,xvals="mean",xHist=TRUE) +## Add and remove histograms +plot(ab1,xvals="mean",xHist=TRUE) -plot(ab1,xvals="mean",xHist=TRUE,yHist=FALSE) +plot(ab1,xvals="mean",xHist=TRUE,yHist=FALSE) -plot(ab1,xvals="mean",yHist=FALSE) +plot(ab1,xvals="mean",yHist=FALSE) - -############################################################# -## Adding post hoc analyses to the main plot -# get original graphing parameters to be reset at the end -op <- par(no.readonly=TRUE) - -# get raw data -tmp <- ab1$d -head(tmp) + +############################################################# +## Adding post hoc analyses to the main plot +# get original graphing parameters to be reset at the end +op <- par(no.readonly=TRUE) + +# get raw data +tmp <- ab1$d +head(tmp) #> scaleC otolithC diff mean #> 1 3 3 0 3.0 #> 2 4 3 1 3.5 @@ -540,31 +632,31 @@

      Examples

      #> 4 4 3 1 3.5 #> 5 3 3 0 3.0 #> 6 4 6 -2 5.0 - -# Add mean difference (w/ approx. 95% CI) -bias <- mean(tmp$diff)+c(-1.96,0,1.96)*se(tmp$diff) -plot(ab1,xvals="mean",xlim=c(1,17),allowAdd=TRUE) -abline(h=bias,lty=2,col="red") + +# Add mean difference (w/ approx. 95% CI) +bias <- mean(tmp$diff)+c(-1.96,0,1.96)*se(tmp$diff) +plot(ab1,xvals="mean",xlim=c(1,17),allowAdd=TRUE) +abline(h=bias,lty=2,col="red") -par(op) - -# Same as above, but without marginal histogram, and with -# 95% agreement lines as well (1.96SDs) -# (this is nearly a replicate of a Bland-Altman plot) -bias <- mean(tmp$diff)+c(-1.96,0,1.96)*se(tmp$diff) -agline <- mean(tmp$diff)+c(-1.96,1.96)*sd(tmp$diff) -plot(ab1,xvals="mean",yHist=FALSE,allowAdd=TRUE) -abline(h=bias,lty=2,col="red") -abline(h=agline,lty=3,lwd=2,col="blue") +par(op) + +# Same as above, but without marginal histogram, and with +# 95% agreement lines as well (1.96SDs) +# (this is nearly a replicate of a Bland-Altman plot) +bias <- mean(tmp$diff)+c(-1.96,0,1.96)*se(tmp$diff) +agline <- mean(tmp$diff)+c(-1.96,1.96)*sd(tmp$diff) +plot(ab1,xvals="mean",yHist=FALSE,allowAdd=TRUE) +abline(h=bias,lty=2,col="red") +abline(h=agline,lty=3,lwd=2,col="blue") -par(op) - -# Add linear regression line of differences on means (w/ approx. 95% CI) -lm1 <- lm(diff~mean,data=tmp) -xval <- seq(0,19,0.1) -pred1 <- predict(lm1,data.frame(mean=xval),interval="confidence") -bias1 <- data.frame(xval,pred1) -head(bias1) +par(op) + +# Add linear regression line of differences on means (w/ approx. 95% CI) +lm1 <- lm(diff~mean,data=tmp) +xval <- seq(0,19,0.1) +pred1 <- predict(lm1,data.frame(mean=xval),interval="confidence") +bias1 <- data.frame(xval,pred1) +head(bias1) #> xval fit lwr upr #> 1 0.0 1.261964 0.6232456 1.900682 #> 2 0.1 1.217797 0.5861659 1.849428 @@ -572,21 +664,21 @@

      Examples

      #> 4 0.3 1.129463 0.5119307 1.746995 #> 5 0.4 1.085296 0.4747735 1.695819 #> 6 0.5 1.041129 0.4375887 1.644670 -plot(ab1,xvals="mean",xlim=c(1,17),allowAdd=TRUE) -lines(lwr~xval,data=bias1,lty=2,col="red") -lines(upr~xval,data=bias1,lty=2,col="red") -lines(fit~xval,data=bias1,lty=2,col="red") +plot(ab1,xvals="mean",xlim=c(1,17),allowAdd=TRUE) +lines(lwr~xval,data=bias1,lty=2,col="red") +lines(upr~xval,data=bias1,lty=2,col="red") +lines(fit~xval,data=bias1,lty=2,col="red") -par(op) - -# Add loess of differences on means (w/ approx. 95% CI as a polygon) -lo2 <- loess(diff~mean,data=tmp) -xval <- seq(min(tmp$mean),max(tmp$mean),0.1) -pred2 <- predict(lo2,data.frame(mean=xval),se=TRUE) -bias2 <- data.frame(xval,pred2) -bias2$lwr <- bias2$fit-1.96*bias2$se.fit -bias2$upr <- bias2$fit+1.96*bias2$se.fit -head(bias2) +par(op) + +# Add loess of differences on means (w/ approx. 95% CI as a polygon) +lo2 <- loess(diff~mean,data=tmp) +xval <- seq(min(tmp$mean),max(tmp$mean),0.1) +pred2 <- predict(lo2,data.frame(mean=xval),se=TRUE) +bias2 <- data.frame(xval,pred2) +bias2$lwr <- bias2$fit-1.96*bias2$se.fit +bias2$upr <- bias2$fit+1.96*bias2$se.fit +head(bias2) #> xval fit se.fit residual.scale df lwr upr #> 1 1.0 -0.16024844 0.5716894 1.887342 145.685 -1.2807597 0.9602628 #> 2 1.1 -0.13313237 0.5396064 1.887342 145.685 -1.1907610 0.9244962 @@ -594,59 +686,55 @@

      Examples

      #> 4 1.3 -0.08401421 0.4802391 1.887342 145.685 -1.0252828 0.8572543 #> 5 1.4 -0.06202196 0.4530549 1.887342 145.685 -0.9500096 0.8259657 #> 6 1.5 -0.04174751 0.4276065 1.887342 145.685 -0.8798563 0.7963613 -plot(ab1,xvals="mean",xlim=c(1,17),allowAdd=TRUE) -with(bias2,polygon(c(xval,rev(xval)),c(lwr,rev(upr)), - col=col2rgbt("red",1/10),border=NA)) -lines(fit~xval,data=bias2,lty=2,col="red") +plot(ab1,xvals="mean",xlim=c(1,17),allowAdd=TRUE) +with(bias2,polygon(c(xval,rev(xval)),c(lwr,rev(upr)), + col=col2rgbt("red",1/10),border=NA)) +lines(fit~xval,data=bias2,lty=2,col="red") -par(op) - -# Same as above, but polygon and line behind the points -plot(ab1,xvals="mean",xlim=c(1,17),col.pts="white",allowAdd=TRUE) -with(bias2,polygon(c(xval,rev(xval)),c(lwr,rev(upr)), - col=col2rgbt("red",1/10),border=NA)) -lines(fit~xval,data=bias2,lty=2,col="red") -points(diff~mean,data=tmp,pch=19,col=col2rgbt("black",1/8)) +par(op) + +# Same as above, but polygon and line behind the points +plot(ab1,xvals="mean",xlim=c(1,17),col.pts="white",allowAdd=TRUE) +with(bias2,polygon(c(xval,rev(xval)),c(lwr,rev(upr)), + col=col2rgbt("red",1/10),border=NA)) +lines(fit~xval,data=bias2,lty=2,col="red") +points(diff~mean,data=tmp,pch=19,col=col2rgbt("black",1/8)) -par(op) - -# Can also be made with the reference ages on the x-axis -lo3 <- loess(diff~otolithC,data=tmp) -xval <- seq(min(tmp$otolithC),max(tmp$otolithC),0.1) -pred3 <- predict(lo3,data.frame(otolithC=xval),se=TRUE) -bias3 <- data.frame(xval,pred3) -bias3$lwr <- bias3$fit-1.96*bias3$se.fit -bias3$upr <- bias3$fit+1.96*bias3$se.fit -plot(ab1,show.range=FALSE,show.pts=TRUE,col.pts="white",allowAdd=TRUE) -with(bias3,polygon(c(xval,rev(xval)),c(lwr,rev(upr)), - col=col2rgbt("red",1/10),border=NA)) -lines(fit~xval,data=bias3,lty=2,col="red") -points(diff~otolithC,data=tmp,pch=19,col=col2rgbt("black",1/8)) +par(op) + +# Can also be made with the reference ages on the x-axis +lo3 <- loess(diff~otolithC,data=tmp) +xval <- seq(min(tmp$otolithC),max(tmp$otolithC),0.1) +pred3 <- predict(lo3,data.frame(otolithC=xval),se=TRUE) +bias3 <- data.frame(xval,pred3) +bias3$lwr <- bias3$fit-1.96*bias3$se.fit +bias3$upr <- bias3$fit+1.96*bias3$se.fit +plot(ab1,show.range=FALSE,show.pts=TRUE,col.pts="white",allowAdd=TRUE) +with(bias3,polygon(c(xval,rev(xval)),c(lwr,rev(upr)), + col=col2rgbt("red",1/10),border=NA)) +lines(fit~xval,data=bias3,lty=2,col="red") +points(diff~otolithC,data=tmp,pch=19,col=col2rgbt("black",1/8)) -par(op) - +par(op) +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/agePrecision.html b/docs/reference/agePrecision.html index 98870f3a..ec55d9f0 100644 --- a/docs/reference/agePrecision.html +++ b/docs/reference/agePrecision.html @@ -1,102 +1,122 @@ -Compute measures of precision among sets of ages. — agePrecision • FSACompute measures of precision among sets of ages. — agePrecision • FSA + + Skip to contents -
    -
    -
    - +
    +
    +
    -
    -

    Computes overall measures of precision for multiple age estimates made on the same individuals. Ages may be from two or more readers of the same structure, one reader at two or more times, or two or more structures (e.g., scales, spines, otoliths). Measures of precision include ACV (Average Coefficient of Variation), APE (Average Percent Error), AAD (Average Absolute Deviation), and ASD (Average Standard Devation), and various percentage difference values.

    +
    +

    Computes overall measures of precision for multiple age estimates made on the same individuals. Ages may be from two or more readers of the same structure, one reader at two or more times, or two or more structures (e.g., scales, spines, otoliths). Measures of precision include ACV (Average Coefficient of Variation), APE (Average Percent Error), AAD (Average Absolute Deviation), and ASD (Average Standard Deviation), and various percentage difference values.

    -
    -
    agePrecision(formula, data)
    -
    -# S3 method for agePrec
    -summary(
    -  object,
    -  what = c("precision", "difference", "absolute difference", "details"),
    -  percent = TRUE,
    -  trunc.diff = NULL,
    -  digits = 4,
    -  show.prec2 = FALSE,
    -  ...
    -)
    +
    +

    Usage

    +
    agePrecision(formula, data)
    +
    +# S3 method for agePrec
    +summary(
    +  object,
    +  what = c("precision", "difference", "absolute difference", "details"),
    +  percent = TRUE,
    +  trunc.diff = NULL,
    +  digits = 4,
    +  show.prec2 = FALSE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    formula

    A formula of the form ~var1+var2+var3+... or, alternatively, var1~var2+var3+..., where the varX generically represent the variables that contain the age estimates. The alternative formula allows for similar code as used in ageBias and can have only one variable on the left-hand side.

    + +
    data

    A data.frame that minimally contains the variables in formula.

    + +
    object

    An object of class agePrec, usually from agePrecision.

    + +
    what

    A string (or vector of strings) that indicates what type of summary to print. See details.

    + +
    percent

    A logical that indicates whether the difference table (see details) should be represented as percentages (TRUE; default) or frequency (FALSE) of fish.

    + +
    trunc.diff

    A single integer that identifies the age for which all values that age and greater are combined into one category. See the examples.

    + +
    digits

    A single numeric that indicates the minimum number of digits to print when using summary.

    + +
    show.prec2

    A logical that indicates whether the precision metrics that use the median (i.e., ACV2 and APE2) should be shown when only two age estimates were made (in this instance they will be exactly equal to ACV and APE). Default is to not show these values in this situation.

    + +
    ...

    Additional arguments for methods.

    +
    -
    -

    Value

    -

    The main function returns a list with the following items:

    • detail A data.frame with all data given in data and intermediate calculations for each fish. See details

    • +
      +

      Value

      + + +

      The main function returns a list with the following items:

      • detail A data.frame with all data given in data and intermediate calculations for each fish. See details

      • rawdiff A frequency table of fish by differences for each pair of ages.

      • absdiff A frequency table of fish by absolute differences for each pair of ages.

      • AAD The average absolute deviation.

      • @@ -111,17 +131,17 @@

        Value

      • validn Number of fish in data that have non-NA data for all R age estimates.

      The summary returns the result if what= contains only one item, otherwise it returns nothing. See details for what is printed.

      -
      -

      Details

      +
      +

      Details

      If what="precision" in summary then a summary table that contains the following items will be printed:

      • n Number of fish in data.

      • validn Number of fish in data that have non-NA data for all R age estimates.

      • R Number of age estimates given in formula.

      • PercAgree The percentage of fish for which all age estimates perfectly agree.

      • ASD The average (across all fish) standard deviation of ages within a fish.

      • -
      • ACV The average (across all fish) coefficient of variation of ages within a fish using the mean as the divisor. See the IFAR chapter for calculation details.

      • +
      • ACV The average (across all fish) coefficient of variation of ages within a fish using the mean as the divisor. See the IFAR chapter for calculation details.

      • ACV2 The average (across all fish) coefficient of variation of ages within a fish using the median as the divisor. This will only be shown if R>2 or show.prec2=TRUE.

      • AAD The average (across all fish) absolute deviation of ages within a fish.

      • -
      • APE The average (across all fish) percent error of ages within a fish using the mean as the divisor. See the IFAR chapter for calculation details.

      • +
      • APE The average (across all fish) percent error of ages within a fish using the mean as the divisor. See the IFAR chapter for calculation details.

      • APE2 The average (across all fish) percent error of ages within a fish using the median as the divisor. This will only be shown if R>2 or show.prec2=TRUE.

      • AD The average (across all fish) index of precision (D).

      Note that ACV2 and APE2 will not be printed when what="precision" if only two sets of ages are given (because mean=median such that ACV=ACV2 and APE=APE2). @@ -130,37 +150,37 @@

      Details

      If what="detail" is used in summary, then a data.frame of the original data along with the intermediate calculations of the mean age, median age, modal age (will be NA if a mode does not exist (e.g., all different ages) or multiple modes exist), standard deviation of age (SD), coefficient of variation using the mean as the divisor (CV), coefficient of variation using the median as the divisor (CV2), absolute deviation using the mean as the divisor (AD), absolute deviation using the median as the divisor (AD2), average percent error (PE), and index of precision (D) for each individual will be printed.

      All percentage calculations above use the validn value in the denominator.

      -
      -

      Testing

      -

      Tested all precision results against published results in Herbst and Marsden (2011) for the WhitefishLC data and the results for the AlewifeLH data set from FSAdata against results from the online resource at http://www.nefsc.noaa.gov/fbp/age-prec/.

      +
      +

      Testing

      +

      Tested all precision results against published results in Herbst and Marsden (2011) for the WhitefishLC data and the results for the AlewifeLH data set from FSAdata against results from the online resource at http://www.nefsc.noaa.gov/fbp/age-prec/.

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      4-Age Comparisons.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Beamish, R.J. and D.A. Fournier. 1981. A method for comparing the precision of a set of age determinations. Canadian Journal of Fisheries and Aquatic Sciences 38:982-983. [Was (is?) available from http://www.pac.dfo-mpo.gc.ca/science/people-gens/beamish/PDF_files/compareagecjfas1981.pdf.]

      Campana, S.E. 1982. Accuracy, precision and quality control in age determination, including a review of the use and abuse of age validation methods. Journal of Fish Biology 59:197-242. [Was (is?) available from http://www.denix.osd.mil/nr/crid/Coral_Reef_Iniative_Database/References_for_Reef_Assessment_files/Campana,%202001.pdf.]

      Campana, S.E., M.C. Annand, and J.I. McMillan. 1995. Graphical and statistical methods for determining the consistency of age determinations. Transactions of the American Fisheries Society 124:131-138. [Was (is?) available from http://www.bio.gc.ca/otoliths/documents/Campana%20et%20al%201995%20TAFS.pdf.]

      Chang, W.Y.B. 1982. A statistical method for evaluating the reproducibility of age determination. Canadian Journal of Fisheries and Aquatic Sciences 39:1208-1210. [Was (is?) available from http://www.nrcresearchpress.com/doi/abs/10.1139/f82-158.]

      McBride, R.S. 2015. Diagnosis of paired age agreement: A simulation approach of accuracy and precision effects. ICES Journal of Marine Science, 72:2149-2167.

      -
      -

      See also

      +
      +

      See also

      See ageBias for computation of the full age agreement table, along with tests and plots of age bias.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      ## Example with just two age estimates
      -ap1 <- agePrecision(~otolithC+scaleC,data=WhitefishLC)
      -summary(ap1)
      +    
      +

      Examples

      +
      ## Example with just two age estimates
      +ap1 <- agePrecision(~otolithC+scaleC,data=WhitefishLC)
      +summary(ap1)
       #> Precision summary statistics
       #>    n validn R PercAgree   ASD   ACV   AAD   APE
       #>  151    151 2     19.87 1.541 21.11 1.089 14.93
      @@ -484,29 +504,29 @@ 

      Examples

      #> 149 3.226 #> 150 25.000 #> 151 36.842 -summary(ap1,what="precision") +summary(ap1,what="precision") #> n validn R PercAgree ASD ACV AAD APE #> 151 151 2 19.87 1.541 21.11 1.089 14.93 -summary(ap1,what="difference") +summary(ap1,what="difference") #> -3 -2 -1 0 1 2 3 4 5 6 #> 2.6490 0.6623 10.5960 19.8675 19.8675 15.8940 11.2583 5.9603 3.3113 5.2980 #> 7 8 9 10 11 12 13 #> 1.9868 1.3245 0.0000 0.0000 0.6623 0.0000 0.6623 -summary(ap1,what="difference",percent=FALSE) +summary(ap1,what="difference",percent=FALSE) #> -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 #> 4 1 16 30 30 24 17 9 5 8 3 2 0 0 1 0 1 -summary(ap1,what="absolute") +summary(ap1,what="absolute") #> 0 1 2 3 4 5 6 7 8 9 #> 19.8675 30.4636 16.5563 13.9073 5.9603 3.3113 5.2980 1.9868 1.3245 0.0000 #> 10 11 12 13 #> 0.0000 0.6623 0.0000 0.6623 -summary(ap1,what="absolute",percent=FALSE) +summary(ap1,what="absolute",percent=FALSE) #> 0 1 2 3 4 5 6 7 8 9 10 11 12 13 #> 30 46 25 21 9 5 8 3 2 0 0 1 0 1 -summary(ap1,what="absolute",trunc.diff=4) +summary(ap1,what="absolute",trunc.diff=4) #> 0 1 2 3 4+ #> 19.87 30.46 16.56 13.91 19.21 -summary(ap1,what=c("precision","difference")) +summary(ap1,what=c("precision","difference")) #> Precision summary statistics #> n validn R PercAgree ASD ACV AAD APE #> 151 151 2 19.87 1.541 21.11 1.089 14.93 @@ -517,7 +537,7 @@

      Examples

      #> 2.6490 0.6623 10.5960 19.8675 19.8675 15.8940 11.2583 5.9603 3.3113 5.2980 #> 7 8 9 10 11 12 13 #> 1.9868 1.3245 0.0000 0.0000 0.6623 0.0000 0.6623 -summary(ap1,what="detail") +summary(ap1,what="detail") #> otolithC scaleC mean median mode SD CV CV2 AD PE PE2 #> 1 3 3 3.0 3.0 3 0.0000 0.000 0.000 0.0 0.000 0.000 #> 2 3 4 3.5 3.5 NA 0.7071 20.203 20.203 0.5 14.286 14.286 @@ -822,25 +842,25 @@

      Examples

      #> 149 3.226 #> 150 25.000 #> 151 36.842 - -barplot(ap1$rawdiff,ylab="Frequency",xlab="Otolith - Scale Age") + +barplot(ap1$rawdiff,ylab="Frequency",xlab="Otolith - Scale Age") -plot(AD~mean,data=ap1$detail,pch=19,col=col2rgbt("black",1/5), - xlab="Mean Age",ylab="Absolute Deviation Age") +plot(AD~mean,data=ap1$detail,pch=19,col=col2rgbt("black",1/5), + xlab="Mean Age",ylab="Absolute Deviation Age") -plot(SD~mean,data=ap1$detail,pch=19,col=col2rgbt("black",1/5), - xlab="Mean Age",ylab="Standard deviation Age") +plot(SD~mean,data=ap1$detail,pch=19,col=col2rgbt("black",1/5), + xlab="Mean Age",ylab="Standard deviation Age") -plot(SD~AD,data=ap1$detail,pch=19,col=col2rgbt("black",1/5), - xlab="Absolute Deviation Age",ylab="Standard deviation Age") +plot(SD~AD,data=ap1$detail,pch=19,col=col2rgbt("black",1/5), + xlab="Absolute Deviation Age",ylab="Standard deviation Age") -plot(CV~PE,data=ap1$detail,pch=19,col=col2rgbt("black",1/5), - xlab="Percent Error Age",ylab="Coefficient of Variation Age") +plot(CV~PE,data=ap1$detail,pch=19,col=col2rgbt("black",1/5), + xlab="Percent Error Age",ylab="Coefficient of Variation Age") - -## Example with three age estimates -ap2 <- agePrecision(~otolithC+finrayC+scaleC,data=WhitefishLC) -summary(ap2,digits=3) + +## Example with three age estimates +ap2 <- agePrecision(~otolithC+finrayC+scaleC,data=WhitefishLC) +summary(ap2,digits=3) #> Precision summary statistics #> n validn R PercAgree ASD ACV ACV2 AAD APE APE2 AD #> 151 151 3 12.6 1.49 21.8 27.5 1.1 16.2 14.8 12.6 @@ -1176,10 +1196,10 @@

      Examples

      #> 149 4.44 3.85 #> 150 20.00 16.38 #> 151 29.17 23.13 -summary(ap2,what="precision") +summary(ap2,what="precision") #> n validn R PercAgree ASD ACV ACV2 AAD APE APE2 AD #> 151 151 3 12.58 1.487 21.77 27.52 1.104 16.19 14.76 12.57 -summary(ap2,what="difference") +summary(ap2,what="difference") #> -4 -3 -2 -1 0 1 2 #> otolithC - finrayC 0.0000 0.0000 1.3245 3.3113 24.5033 17.8808 16.5563 #> otolithC - scaleC 0.0000 2.6490 0.6623 10.5960 19.8675 19.8675 15.8940 @@ -1192,12 +1212,12 @@

      Examples

      #> otolithC - finrayC 0.0000 0.6623 0.0000 0.0000 0.6623 #> otolithC - scaleC 0.0000 0.6623 0.0000 0.6623 0.0000 #> finrayC - scaleC 0.0000 0.0000 0.0000 0.0000 0.0000 -summary(ap2,what="absolute",percent=FALSE,trunc.diff=4) +summary(ap2,what="absolute",percent=FALSE,trunc.diff=4) #> 0 1 2 3 4+ #> otolithC v. finrayC 37 32 27 18 37 #> otolithC v. scaleC 30 46 25 21 29 #> finrayC v. scaleC 61 52 23 8 7 -summary(ap2,what="detail",digits=3) +summary(ap2,what="detail",digits=3) #> otolithC finrayC scaleC mean median mode SD CV CV2 AD PE #> 1 3 3 3 3.00 3 3 0.000 0.00 0.00 0.000 0.00 #> 2 3 3 4 3.33 3 3 0.577 17.32 23.57 0.444 13.33 @@ -1502,45 +1522,41 @@

      Examples

      #> 149 4.44 3.85 #> 150 20.00 16.38 #> 151 29.17 23.13 - -plot(AD~mean,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), - xlab="Mean Age",ylab="Absolute Deviation Age") + +plot(AD~mean,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), + xlab="Mean Age",ylab="Absolute Deviation Age") -plot(SD~mean,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), - xlab="Mean Age",ylab="Standard Deviation Age") +plot(SD~mean,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), + xlab="Mean Age",ylab="Standard Deviation Age") -plot(SD~AD,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), - xlab="Absolute Deviation Age",ylab="Standard Deviation Age") +plot(SD~AD,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), + xlab="Absolute Deviation Age",ylab="Standard Deviation Age") -plot(CV~PE,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), - xlab="Percent Error Age",ylab="Coefficient of Variation Age") +plot(CV~PE,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), + xlab="Percent Error Age",ylab="Coefficient of Variation Age") -plot(median~mean,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), - xlab="Mean Age",ylab="Median Age") +plot(median~mean,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), + xlab="Mean Age",ylab="Median Age") - +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/alkAgeDist.html b/docs/reference/alkAgeDist.html index a648fdd3..96fe9f88 100644 --- a/docs/reference/alkAgeDist.html +++ b/docs/reference/alkAgeDist.html @@ -1,126 +1,134 @@ -Proportions-at-age from an age-length key — alkAgeDist • FSAProportions-at-age from an age-length key — alkAgeDist • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes the proportions-at-age (with standard errors) in a larger sample based on an age-length-key created from a subsample of ages through a two-stage random sampling design. Follows the methods in Quinn and Deriso (1999).

    -
    -
    alkAgeDist(key, lenA.n, len.n)
    +
    +

    Usage

    +
    alkAgeDist(key, lenA.n, len.n)
    -
    -

    Arguments

    +
    +

    Arguments

    key

    A numeric matrix that contains the age-length key. See details.

    + +
    lenA.n

    A numeric vector of sample sizes for each length interval in the aged sample.

    + +
    len.n

    A numeric vector of sample sizes for each length interval in the complete sample (i.e., all fish regardless of whether they were aged or not).

    +
    -
    -

    Value

    -

    A data.frame with as many rows as ages (columns) present in key and the following three variables:

    • age The ages.

    • +
      +

      Value

      + + +

      A data.frame with as many rows as ages (columns) present in key and the following three variables:

      • age The ages.

      • prop The proportion of fish at each age.

      • se The SE for the proportion of fish at each age.

      -
      -

      Details

      +
      +

      Details

      The age-length key in key must have length intervals as rows and ages as columns. The row names of key (i.e., rownames(key)) must contain the minimum values of each length interval (e.g., if an interval is 100-109 then the corresponding row name must be 100). The column names of key (i.e., colnames(key)) must contain the age values (e.g., the columns can NOT be named with “age.1”, for example).

      The length intervals in the rows of key must contain all of the length intervals present in the larger sample. Thus, the length of len.n must, at least, equal the number of rows in key. If this constraint is not met, then the function will stop with an error message.

      The values in lenA.n are equal to what the row sums of key would have been before key was converted to a row proportions table. Thus, the length of lenA.n must also be equal to the number of rows in key. If this constraint is not met, then the function will stop with an error message.

      -
      -

      Testing

      -

      The results from this function perfectly match the results in Table 8.4 (left) of Quinn and Deriso (1999) using SnapperHG2 from FSAdata. The results also perfectly match the results from using alkprop in fishmethods.

      +
      +

      Testing

      +

      The results from this function perfectly match the results in Table 8.4 (left) of Quinn and Deriso (1999) using SnapperHG2 from FSAdata. The results also perfectly match the results from using alkprop in fishmethods.

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      5-Age-Length Key.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Lai, H.-L. 1987. Optimum allocation for estimating age composition using age-length key. Fishery Bulletin, 85:179-185.

      Lai, H.-L. 1993. Optimum sampling design for using the age-length key to estimate age composition of a fish population. Fishery Bulletin, 92:382-388.

      Quinn, T. J. and R. B. Deriso. 1999. Quantitative Fish Dynamics. Oxford University Press, New York, New York. 542 pages.

      -
      -

      See also

      +
      +

      See also

      See alkIndivAge and related functions for a completely different methodology. See alkprop from fishmethods for the exact same methodology but with a different format for the inputs.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      ## Example -- Even breaks for length categories
      -WR1 <- WR79
      -# add length intervals (width=5)
      -WR1$LCat <- lencat(WR1$len,w=5)
      -# get number of fish in each length interval in the entire sample
      -len.n <- xtabs(~LCat,data=WR1)
      -# isolate aged sample and get number in each length interval
      -WR1.age <- subset(WR1, !is.na(age))
      -lenA.n <- xtabs(~LCat,data=WR1.age)
      -# create age-length key
      -raw <- xtabs(~LCat+age,data=WR1.age)
      -( WR1.key <- prop.table(raw, margin=1) )
      +    
      +

      Examples

      +
      ## Example -- Even breaks for length categories
      +WR1 <- WR79
      +# add length intervals (width=5)
      +WR1$LCat <- lencat(WR1$len,w=5)
      +# get number of fish in each length interval in the entire sample
      +len.n <- xtabs(~LCat,data=WR1)
      +# isolate aged sample and get number in each length interval
      +WR1.age <- subset(WR1, !is.na(age))
      +lenA.n <- xtabs(~LCat,data=WR1.age)
      +# create age-length key
      +raw <- xtabs(~LCat+age,data=WR1.age)
      +( WR1.key <- prop.table(raw, margin=1) )
       #>      age
       #> LCat           4          5          6          7          8          9
       #>   35  1.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
      @@ -159,9 +167,9 @@ 

      Examples

      #> 105 0.14285714 0.00000000 #> 110 0.20000000 0.20000000 #> 115 1.00000000 0.00000000 - -# use age-length key to estimate age distribution of all fish -alkAgeDist(WR1.key,lenA.n,len.n) + +# use age-length key to estimate age distribution of all fish +alkAgeDist(WR1.key,lenA.n,len.n) #> age prop se #> 1 4 0.416378219 0.013206893 #> 2 5 0.167201351 0.013907259 @@ -171,29 +179,25 @@

      Examples

      #> 6 9 0.032955436 0.011718417 #> 7 10 0.016306565 0.008988814 #> 8 11 0.002532714 0.002574054 - +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/alkIndivAge.html b/docs/reference/alkIndivAge.html index 59a1dfda..9c8b7588 100644 --- a/docs/reference/alkIndivAge.html +++ b/docs/reference/alkIndivAge.html @@ -1,133 +1,147 @@ -Use an age-length key to assign age to individuals in the unaged sample. — alkIndivAge • FSAUse an age-length key to assign age to individuals in the unaged sample. — alkIndivAge • FSA + + Skip to contents -
    -
    -
    - +
    +
    +
    -
    +

    Use either the semi- or completely-random methods from Isermann and Knight (2005) to assign ages to individual fish in the unaged sample according to the information in an age-length key supplied by the user.

    -
    -
    alkIndivAge(
    -  key,
    -  formula,
    -  data,
    -  type = c("SR", "CR"),
    -  breaks = NULL,
    -  seed = NULL
    -)
    +
    +

    Usage

    +
    alkIndivAge(
    +  key,
    +  formula,
    +  data,
    +  type = c("SR", "CR"),
    +  breaks = NULL,
    +  seed = NULL
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    key

    A numeric matrix that contains the age-length key. The format of this matrix is important. See details.

    + +
    formula

    A formula of the form age~length where age generically represents the variable that will contain the estimated ages once the key is applied (i.e., should currently contain no values) and length generically represents the variable that contains the known length measurements. If only ~length is used, then a new variable called “age” will be created in the resulting data frame.

    + +
    data

    A data.frame that minimally contains the length measurements and possibly contains a variable that will receive the age assignments as given in formula.

    + +
    type
    -

    A string that indicates whether to use the semi-random (type="SR", default) or completely-random (type="CR") methods for assigning ages to individual fish. See the IFAR chapter for more details.

    +

    A string that indicates whether to use the semi-random (type="SR", default) or completely-random (type="CR") methods for assigning ages to individual fish. See the IFAR chapter for more details.

    + +
    breaks

    A numeric vector of lower values that define the length intervals. See details.

    + +
    seed

    A single numeric that is given to set.seed to set the random seed. This allows repeatability of results.

    +
    -
    -

    Value

    -

    The original data.frame in data with assigned ages added to the column supplied in formula or in an additional column labeled as age. See details.

    +
    +

    Value

    + + +

    The original data.frame in data with assigned ages added to the column supplied in formula or in an additional column labeled as age. See details.

    -
    -

    Details

    +
    +

    Details

    The age-length key in key must have length intervals as rows and ages as columns. The row names of key (i.e., rownames(key)) must contain the minimum values of each length interval (e.g., if an interval is 100-109, then the corresponding row name must be 100). The column names of key (i.e., colnames(key)) must contain the age values (e.g., the columns can NOT be named with “age.1”, for example).

    The length intervals in the rows of key must contain all of the length intervals present in the unaged sample to which the age-length key is to be applied (i.e., sent in the length portion of the formula). If this constraint is not met, then the function will stop with an error message.

    If breaks=NULL, then the length intervals for the unaged sample will be determined with a starting interval at the minimum value of the row names in key and a width of the length intervals as determined by the minimum difference in adjacent row names of key. If length intervals of differing widths were used when constructing key, then those breaks should be supplied to breaks=. Use of breaks= may be useful when “uneven” length interval widths were used because the lengths in the unaged sample are not fully represented in the aged sample. See the examples.

    Assigned ages will be stored in the column identified on the left-hand-side of formula (if the formula has both a left- and right-hand-side). If this variable is missing in formula, then the new column will be labeled with age.

    -
    -

    Testing

    +
    +

    Testing

    The type="SR" method worked perfectly on a small example. The type="SR" method provides results that reasonably approximate the results from alkAgeDist and alkMeanVar, which suggests that the age assessments are reasonable.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    5-Age-Length Key.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    Isermann, D.A. and C.T. Knight. 2005. A computer program for age-length keys incorporating age assignment to individual fish. North American Journal of Fisheries Management, 25:1153-1160. [Was (is?) from http://www.tandfonline.com/doi/abs/10.1577/M04-130.1.]

    -
    -

    See also

    +
    +

    See also

    See alkAgeDist and alkMeanVar for alternative methods to derived age distributions and mean (and SD) values for each age. See alkPlot for methods to visualize age-length keys.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com. This is largely an R version of the SAS code provided by Isermann and Knight (2005).

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com. This is largely an R version of the SAS code provided by Isermann and Knight (2005).

    -
    -

    Examples

    -
    ## First Example -- Even breaks for length categories
    -WR1 <- WR79
    -# add length categories (width=5)
    -WR1$LCat <- lencat(WR1$len,w=5)
    -# isolate aged and unaged samples
    -WR1.age <- subset(WR1, !is.na(age))
    -WR1.len <- subset(WR1, is.na(age))
    -# note no ages in unaged sample
    -head(WR1.len)
    +    
    +

    Examples

    +
    ## First Example -- Even breaks for length categories
    +WR1 <- WR79
    +# add length categories (width=5)
    +WR1$LCat <- lencat(WR1$len,w=5)
    +# isolate aged and unaged samples
    +WR1.age <- subset(WR1, !is.na(age))
    +WR1.len <- subset(WR1, is.na(age))
    +# note no ages in unaged sample
    +head(WR1.len)
     #>   ID len age LCat
     #> 1  1  37  NA   35
     #> 2  2  37  NA   35
    @@ -135,9 +149,9 @@ 

    Examples

    #> 4 4 37 NA 35 #> 7 7 42 NA 40 #> 8 8 42 NA 40 -# create age-length key -raw <- xtabs(~LCat+age,data=WR1.age) -( WR1.key <- prop.table(raw, margin=1) ) +# create age-length key +raw <- xtabs(~LCat+age,data=WR1.age) +( WR1.key <- prop.table(raw, margin=1) ) #> age #> LCat 4 5 6 7 8 9 #> 35 1.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 @@ -176,10 +190,10 @@

    Examples

    #> 105 0.14285714 0.00000000 #> 110 0.20000000 0.20000000 #> 115 1.00000000 0.00000000 -# apply the age-length key -WR1.len <- alkIndivAge(WR1.key,age~len,data=WR1.len) -# now there are ages -head(WR1.len) +# apply the age-length key +WR1.len <- alkIndivAge(WR1.key,age~len,data=WR1.len) +# now there are ages +head(WR1.len) #> ID len age LCat #> 1 1 37 4 35 #> 2 2 37 4 35 @@ -187,44 +201,44 @@

    Examples

    #> 4 4 37 4 35 #> 7 7 42 4 40 #> 8 8 42 4 40 -# combine orig age & new ages -WR1.comb <- rbind(WR1.age, WR1.len) -# mean length-at-age -Summarize(len~age,data=WR1.comb,digits=2) +# combine orig age & new ages +WR1.comb <- rbind(WR1.age, WR1.len) +# mean length-at-age +Summarize(len~age,data=WR1.comb,digits=2) #> age n mean sd min Q1 median Q3 max -#> 1 4 987 51.86 5.14 35 48.00 52.0 56.00 64 -#> 2 5 395 71.76 5.28 60 68.00 72.0 76.00 84 -#> 3 6 270 86.71 4.66 75 83.00 87.0 89.00 99 -#> 4 7 450 97.60 5.18 85 93.00 97.0 101.00 113 -#> 5 8 146 101.19 5.86 90 96.25 101.0 106.75 113 -#> 6 9 77 103.90 3.35 100 102.00 103.0 106.00 114 -#> 7 10 38 104.84 7.08 95 97.00 106.0 109.50 119 -#> 8 11 6 111.33 1.21 110 110.25 111.5 112.00 113 -# age frequency distribution -( af <- xtabs(~age,data=WR1.comb) ) +#> 1 4 987 51.86 5.15 35 48.00 52.0 56.00 64 +#> 2 5 396 71.78 5.32 60 68.00 72.0 76.00 84 +#> 3 6 269 86.77 4.67 75 83.00 87.0 89.00 99 +#> 4 7 450 97.50 5.12 85 93.00 97.0 101.00 113 +#> 5 8 146 101.27 5.90 90 96.25 101.0 107.00 114 +#> 6 9 77 103.86 3.16 100 102.00 103.0 105.00 113 +#> 7 10 38 105.34 7.00 95 98.00 107.0 109.75 119 +#> 8 11 6 112.33 0.82 111 112.00 112.5 113.00 113 +# age frequency distribution +( af <- xtabs(~age,data=WR1.comb) ) #> age #> 4 5 6 7 8 9 10 11 -#> 987 395 270 450 146 77 38 6 -# proportional age distribution -( ap <- prop.table(af) ) +#> 987 396 269 450 146 77 38 6 +# proportional age distribution +( ap <- prop.table(af) ) #> age #> 4 5 6 7 8 9 -#> 0.416631490 0.166737020 0.113972140 0.189953567 0.061629379 0.032503166 +#> 0.416631490 0.167159139 0.113550021 0.189953567 0.061629379 0.032503166 #> 10 11 #> 0.016040523 0.002532714 - -## Second Example -- length sample does not have an age variable -WR2 <- WR79 -# isolate age and unaged samples -WR2.age <- subset(WR2, !is.na(age)) -WR2.len <- subset(WR2, is.na(age)) -# remove age variable (for demo only) -WR2.len <- WR2.len[,-3] -# add length categories to aged sample -WR2.age$LCat <- lencat(WR2.age$len,w=5) -# create age-length key -raw <- xtabs(~LCat+age,data=WR2.age) -( WR2.key <- prop.table(raw, margin=1) ) + +## Second Example -- length sample does not have an age variable +WR2 <- WR79 +# isolate age and unaged samples +WR2.age <- subset(WR2, !is.na(age)) +WR2.len <- subset(WR2, is.na(age)) +# remove age variable (for demo only) +WR2.len <- WR2.len[,-3] +# add length categories to aged sample +WR2.age$LCat <- lencat(WR2.age$len,w=5) +# create age-length key +raw <- xtabs(~LCat+age,data=WR2.age) +( WR2.key <- prop.table(raw, margin=1) ) #> age #> LCat 4 5 6 7 8 9 #> 35 1.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 @@ -263,11 +277,11 @@

    Examples

    #> 105 0.14285714 0.00000000 #> 110 0.20000000 0.20000000 #> 115 1.00000000 0.00000000 -# apply the age-length key -WR2.len <- alkIndivAge(WR2.key,~len,data=WR2.len) -# add length cat to length sample -WR2.len$LCat <- lencat(WR2.len$len,w=5) -head(WR2.len) +# apply the age-length key +WR2.len <- alkIndivAge(WR2.key,~len,data=WR2.len) +# add length cat to length sample +WR2.len$LCat <- lencat(WR2.len$len,w=5) +head(WR2.len) #> ID len age LCat #> 1 1 37 4 35 #> 2 2 37 4 35 @@ -275,27 +289,27 @@

    Examples

    #> 4 4 37 4 35 #> 7 7 42 4 40 #> 8 8 42 4 40 -# combine orig age & new ages -WR2.comb <- rbind(WR2.age, WR2.len) -Summarize(len~age,data=WR2.comb,digits=2) -#> age n mean sd min Q1 median Q3 max -#> 1 4 987 51.86 5.14 35 48 52.0 56.00 64 -#> 2 5 395 71.78 5.32 60 68 72.0 76.00 84 -#> 3 6 272 86.74 4.68 75 83 87.0 89.00 99 -#> 4 7 449 97.62 5.20 85 93 97.0 102.00 113 -#> 5 8 145 101.20 5.90 90 97 101.0 107.00 113 -#> 6 9 77 103.84 3.22 100 102 103.0 106.00 114 -#> 7 10 38 104.76 7.02 95 98 106.0 109.75 119 -#> 8 11 6 112.50 0.55 112 112 112.5 113.00 113 - -## Third Example -- Uneven breaks for length categories -WR3 <- WR79 -# set up uneven breaks -brks <- c(seq(35,100,5),110,130) -WR3$LCat <- lencat(WR3$len,breaks=brks) -WR3.age <- subset(WR3, !is.na(age)) -WR3.len <- subset(WR3, is.na(age)) -head(WR3.len) +# combine orig age & new ages +WR2.comb <- rbind(WR2.age, WR2.len) +Summarize(len~age,data=WR2.comb,digits=2) +#> age n mean sd min Q1 median Q3 max +#> 1 4 986 51.85 5.12 35 48.00 52.0 56.00 64 +#> 2 5 397 71.78 5.29 60 68.00 72.0 76.00 84 +#> 3 6 270 86.72 4.65 75 83.00 87.0 89.00 99 +#> 4 7 448 97.61 5.16 85 93.00 97.0 102.00 112 +#> 5 8 147 101.20 5.79 90 97.00 101.0 106.00 113 +#> 6 9 77 103.71 3.17 100 102.00 103.0 105.00 114 +#> 7 10 38 105.08 7.12 95 97.25 107.0 109.75 119 +#> 8 11 6 112.50 0.55 112 112.00 112.5 113.00 113 + +## Third Example -- Uneven breaks for length categories +WR3 <- WR79 +# set up uneven breaks +brks <- c(seq(35,100,5),110,130) +WR3$LCat <- lencat(WR3$len,breaks=brks) +WR3.age <- subset(WR3, !is.na(age)) +WR3.len <- subset(WR3, is.na(age)) +head(WR3.len) #> ID len age LCat #> 1 1 37 NA 35 #> 2 2 37 NA 35 @@ -303,8 +317,8 @@

    Examples

    #> 4 4 37 NA 35 #> 7 7 42 NA 40 #> 8 8 42 NA 40 -raw <- xtabs(~LCat+age,data=WR3.age) -( WR3.key <- prop.table(raw, margin=1) ) +raw <- xtabs(~LCat+age,data=WR3.age) +( WR3.key <- prop.table(raw, margin=1) ) #> age #> LCat 4 5 6 7 8 9 #> 35 1.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 @@ -339,11 +353,11 @@

    Examples

    #> 95 0.05882353 0.00000000 #> 100 0.04000000 0.00000000 #> 110 0.42857143 0.14285714 -WR3.len <- alkIndivAge(WR3.key,age~len,data=WR3.len,breaks=brks) +WR3.len <- alkIndivAge(WR3.key,age~len,data=WR3.len,breaks=brks) #> Warning: The maximum observed length in the length sample (117) is greater #> than the largest length category in the age-length key (110). #> The last length category will be treated as all-inclusive. -head(WR3.len) +head(WR3.len) #> ID len age LCat #> 1 1 37 4 35 #> 2 2 37 4 35 @@ -351,40 +365,36 @@

    Examples

    #> 4 4 37 4 35 #> 7 7 42 4 40 #> 8 8 42 4 40 -WR3.comb <- rbind(WR3.age, WR3.len) -Summarize(len~age,data=WR3.comb,digits=2) +WR3.comb <- rbind(WR3.age, WR3.len) +Summarize(len~age,data=WR3.comb,digits=2) #> age n mean sd min Q1 median Q3 max -#> 1 4 987 51.87 5.16 35 48 52 56 64 -#> 2 5 395 71.75 5.35 60 68 72 76 84 -#> 3 6 271 86.66 4.63 75 83 87 89 99 -#> 4 7 451 97.75 5.26 85 93 97 102 114 -#> 5 8 141 100.51 5.45 90 96 101 104 113 -#> 6 9 78 104.56 3.01 100 102 104 107 112 -#> 7 10 41 104.54 7.16 95 98 103 111 119 -#> 8 11 5 113.40 2.19 111 113 113 113 117 - +#> 1 4 986 51.86 5.15 35 48 52.0 56 64 +#> 2 5 396 71.70 5.31 60 68 72.0 76 84 +#> 3 6 270 86.79 4.63 75 83 87.0 89 99 +#> 4 7 450 97.72 5.35 85 93 97.0 102 112 +#> 5 8 142 100.41 5.57 90 97 100.5 104 113 +#> 6 9 81 104.59 3.30 100 102 104.0 107 113 +#> 7 10 40 104.38 7.31 95 97 102.5 112 119 +#> 8 11 4 113.00 2.83 111 111 112.0 114 117 +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/alkMeanVar.html b/docs/reference/alkMeanVar.html index d9135262..fc9a0e72 100644 --- a/docs/reference/alkMeanVar.html +++ b/docs/reference/alkMeanVar.html @@ -1,134 +1,146 @@ -Mean Values-at-age from an age-length key — alkMeanVar • FSAMean Values-at-age from an age-length key — alkMeanVar • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes the mean value-at-age in a larger sample based on an age-length-key created from a subsample of ages through a two-stage random sampling design. The mean values could be mean length-, weight-, or fecundity-at-age, for example. The methods of Bettoli and Miranda (2001) or Quinn and Deriso (1999) are used. A standard deviation is computed for the Bettoli and Miranda (2001) method and standard error for the Quinn and Deriso (1999) method. See the testing section notes.

    -
    -
    alkMeanVar(
    -  key,
    -  formula,
    -  data,
    -  len.n,
    -  method = c("BettoliMiranda", "QuinnDeriso")
    -)
    +
    +

    Usage

    +
    alkMeanVar(
    +  key,
    +  formula,
    +  data,
    +  len.n,
    +  method = c("BettoliMiranda", "QuinnDeriso")
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    key

    A numeric matrix that contains the age-length key. See details.

    + +
    formula

    A formula of the form var~lencat+age where var generically represents the variable to be summarized (e.g., length, weight, fecundity), lencat generically represents the variable that contains the length intervals, and age generically represents the variable that contains the assigned ages.

    + +
    data

    A data.frame that minimally contains the length intervals, assessed ages, and the variable to be summarized (i.e., this should be the aged sample) as given in formula.

    + +
    len.n

    A vector of sample sizes for each length interval in the complete sample (i.e., all fish regardless of whether they were aged or not).

    + +
    method

    A string that indicates which method of calculation should be used. See details.

    +
    -
    -

    Value

    -

    A data.frame with as many rows as ages (columns) present in key and the following three variables:

    • age The ages.

    • +
      +

      Value

      + + +

      A data.frame with as many rows as ages (columns) present in key and the following three variables:

      • age The ages.

      • mean The mean value at each age.

      • sd,se The SD if method="BettoliMiranda" or SE of the mean if method="QuinnDeriso" for the value at each age.

      -
      -

      Details

      +
      +

      Details

      The age-length key key must have length intervals as rows and ages as columns. The row names of key (i.e., rownames(key)) must contain the minimum values of each length interval (e.g., if an interval is 100-109, then the corresponding row name must be 100). The column names of key (i.e., colnames(key)) must contain the age values (e.g., the columns can NOT be named with “age.1”, for example).

      The length intervals in the rows of key must contain all of the length intervals present in the larger sample. Thus, the length of len.n must, at least, equal the number of rows in key. If this constraint is not met, then the function will stop with an error message.

      Note that the function will stop with an error if the formula in formula does not meet the specific criteria outlined in the parameter list above.

      -
      -

      Testing

      +
      +

      Testing

      The results of these functions have not yet been rigorously tested. The Bettoli and Miranda (2001) results appear, at least, approximately correct when compared to the results from alkIndivAge. The Quinn and Deriso (1999) results appear at least approximately correct for the mean values, but do not appear to be correct for the SE values. Thus, a note is returned with the Quinn and Deriso (1999) results that the SE should not be trusted.

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      5-Age-Length Key.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Bettoli, P. W. and Miranda, L. E. 2001. A cautionary note about estimating mean length at age with subsampled data. North American Journal of Fisheries Management, 21:425-428.

      Quinn, T. J. and R. B. Deriso. 1999. Quantitative Fish Dynamics. Oxford University Press, New York, New York. 542 pages

      -
      -

      See also

      +
      +

      See also

      See alkIndivAge and related functions for a completely different methodology. See alkAgeDist for a related method of determining the proportion of fish at each age. See the ALKr package.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      ## Example -- Even breaks for length categories
      -WR1 <- WR79
      -# add length intervals (width=5)
      -WR1$LCat <- lencat(WR1$len,w=5)
      -# get number of fish in each length interval in the entire sample
      -len.n <- xtabs(~LCat,data=WR1)
      -# isolate aged sample
      -WR1.age <- subset(WR1, !is.na(age))
      -# create age-length key
      -raw <- xtabs(~LCat+age,data=WR1.age)
      -( WR1.key <- prop.table(raw, margin=1) )
      +    
      +

      Examples

      +
      ## Example -- Even breaks for length categories
      +WR1 <- WR79
      +# add length intervals (width=5)
      +WR1$LCat <- lencat(WR1$len,w=5)
      +# get number of fish in each length interval in the entire sample
      +len.n <- xtabs(~LCat,data=WR1)
      +# isolate aged sample
      +WR1.age <- subset(WR1, !is.na(age))
      +# create age-length key
      +raw <- xtabs(~LCat+age,data=WR1.age)
      +( WR1.key <- prop.table(raw, margin=1) )
       #>      age
       #> LCat           4          5          6          7          8          9
       #>   35  1.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
      @@ -167,10 +179,10 @@ 

      Examples

      #> 105 0.14285714 0.00000000 #> 110 0.20000000 0.20000000 #> 115 1.00000000 0.00000000 - -## use age-length key to estimate mean length-at-age of all fish -# Bettoli-Miranda method -alkMeanVar(WR1.key,len~LCat+age,WR1.age,len.n) + +## use age-length key to estimate mean length-at-age of all fish +# Bettoli-Miranda method +alkMeanVar(WR1.key,len~LCat+age,WR1.age,len.n) #> age mean sd #> 1 4 51.95492 5.050430 #> 2 5 72.01342 5.519135 @@ -180,9 +192,9 @@

      Examples

      #> 6 9 103.65599 2.312767 #> 7 10 104.86056 7.497515 #> 8 11 113.00000 0.000000 - -# Quinn-Deriso method -alkMeanVar(WR1.key,len~LCat+age,WR1.age,len.n,method="QuinnDeriso") + +# Quinn-Deriso method +alkMeanVar(WR1.key,len~LCat+age,WR1.age,len.n,method="QuinnDeriso") #> The 'se' values should not be trusted! #> age mean se #> 1 4 51.95492 0.3031374 @@ -193,29 +205,25 @@

      Examples

      #> 6 9 103.65599 0.9653842 #> 7 10 104.86056 0.6696853 #> 8 11 113.00000 0.0000000 - +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/alkPlot.html b/docs/reference/alkPlot.html index 9315cd78..39e349da 100644 --- a/docs/reference/alkPlot.html +++ b/docs/reference/alkPlot.html @@ -1,121 +1,151 @@ -Plots to visualize age-length keys. — alkPlot • FSAPlots to visualize age-length keys. — alkPlot • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Various plots to visualize the proportion of fish of certain ages within length intervals in an age-length key.

    -
    -
    alkPlot(
    -  key,
    -  type = c("barplot", "area", "lines", "splines", "bubble"),
    -  xlab = "Length",
    -  ylab = ifelse(type != "bubble", "Proportion", "Age"),
    -  xlim = NULL,
    -  ylim = NULL,
    -  showLegend = FALSE,
    -  lbl.cex = 1.25,
    -  leg.cex = 1,
    -  lwd = 2,
    -  span = 0.25,
    -  grid = TRUE,
    -  col = NULL,
    -  buf = 0.45,
    -  add = FALSE,
    -  ...
    -)
    +
    +

    Usage

    +
    alkPlot(
    +  key,
    +  type = c("barplot", "area", "lines", "splines", "bubble"),
    +  xlab = "Length",
    +  ylab = ifelse(type != "bubble", "Proportion", "Age"),
    +  xlim = NULL,
    +  ylim = NULL,
    +  showLegend = FALSE,
    +  lbl.cex = 1.25,
    +  leg.cex = 1,
    +  lwd = 2,
    +  span = 0.25,
    +  grid = TRUE,
    +  col = NULL,
    +  buf = 0.45,
    +  add = FALSE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    key

    A numeric matrix that contains the age-length key.

    + +
    type

    A string that indicates the type of plot to construct. See details.

    + +
    xlab, ylab

    A string that contains the label for the x- or y-axis.

    + +
    xlim, ylim

    A numeric of length 2 that provide the limits for the x-axis or y-axis.

    + +
    showLegend

    A logical that indicates whether a legend should be displayed (not implemented for type="bubble"). See examples.

    + +
    lbl.cex

    A numeric character expansion value for labels inside the bars when type="barplot" or on the lines when type="lines" or type="splines". Only used if showLegend=FALSE.

    + +
    leg.cex

    A numeric character expansion value for labels on the legend when showLegend=TRUE.

    + +
    lwd

    A numeric that indicates the line width when type="lines" or type="splines".

    + +
    span

    A numeric that indicates the span value to use in loess when type="splines".

    + +
    grid

    A logical that indicates whether a grid should be placed under the bubbles when type="bubble" or a character or appropriate vector that identifies a color for the grid. See examples.

    + +
    col

    A single character string that is a palette from hcl.pals or a vector of character strings containing colors for the bars, areas, lines, or spline lines of different ages; defaults to the "viridis" palette in hcl.colors. A single string that indicates the color of the bubbles when type="bubble".

    + +
    buf

    A single numeric that indicates the relative width of the bubbles when type="bubble". A value of 0.5 means that two full-width bubbles would touch each other either in the x- or y-direction (i.e., this would represent half of the minimum of the physical distance between values one-unit apart on the x- and y-axes). Set this to a value less than 0.5 so that the bubbles will not touch (the default is 0.45).

    + +
    add

    A logical that indicates whether the data should be added to an already existing plot. May be useful for visually comparing age-length keys. Only implemented when type="bubble".

    + +
    ...

    Additional arguments to pass to plot or barplot.

    +
    -
    -

    Value

    -

    None, but a plot is constructed.

    +
    +

    Value

    + + +

    None, but a plot is constructed.

    -
    -

    Details

    +
    +

    Details

    A variety of plots can be used to visualize the proportion of fish of certain ages within length intervals of an age-length key. The types of plots are described below and illustrated in the examples.

    • A “stacked” bar chart where vertical bars over length intervals sum to 1 but are segmented by the proportion of each age in that length interval is constructed with type="barplot". The ages will be labeled in the bar segments unless showLegend=TRUE is used.

    • A “stacked” area chart similar to the bar chart described above is constructed with type="area".

    • A plot with (differently colored) lines that connect the proportions of ages within each length interval is constructed with type="lines".

    • @@ -123,31 +153,31 @@

      Details

    • A “bubble” plot where circles whose size is proportional to the proportion of fish of each age in each length interval is constructed with type="bubble". The color of the bubbles can be controlled with col= and an underlying grid for ease of seeing the age and length interval for each bubble can be controlled with grid=. Bubbles from a second age-length key can be overlaid on an already constructed bubble plot by using add=TRUE in a second call to alkPlot.

    Note that all plots are “vertically conditional” -- i.e., each represents the proportional ages WITHIN each length interval.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    5-Age-Length Key.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    See also

    +
    +

    See also

    See alkIndivAge for using an age-length key to assign ages to individual fish. See hcl.colors for a simple way to choose other colors.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Make an example age-length key
    -WR.age <- droplevels(subset(WR79, !is.na(age)))
    -WR.age$LCat <- lencat(WR.age$len,w=5)
    -raw <- xtabs(~LCat+age,data=WR.age)
    -WR.key <- prop.table(raw, margin=1)
    -round(WR.key,3)
    +    
    +

    Examples

    +
    ## Make an example age-length key
    +WR.age <- droplevels(subset(WR79, !is.na(age)))
    +WR.age$LCat <- lencat(WR.age$len,w=5)
    +raw <- xtabs(~LCat+age,data=WR.age)
    +WR.key <- prop.table(raw, margin=1)
    +round(WR.key,3)
     #>      age
     #> LCat      4     5     6     7     8     9    10    11
     #>   35  1.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
    @@ -167,51 +197,47 @@ 

    Examples

    #> 105 0.000 0.000 0.000 0.286 0.429 0.143 0.143 0.000 #> 110 0.000 0.000 0.000 0.200 0.200 0.200 0.200 0.200 #> 115 0.000 0.000 0.000 0.000 0.000 0.000 1.000 0.000 - -## Various visualizations of the age-length key -alkPlot(WR.key,"barplot") + +## Various visualizations of the age-length key +alkPlot(WR.key,"barplot") -alkPlot(WR.key,"barplot",col="Cork") +alkPlot(WR.key,"barplot",col="Cork") -alkPlot(WR.key,"barplot",col=heat.colors(8)) +alkPlot(WR.key,"barplot",col=heat.colors(8)) -alkPlot(WR.key,"barplot",showLegend=TRUE) +alkPlot(WR.key,"barplot",showLegend=TRUE) -alkPlot(WR.key,"area") +alkPlot(WR.key,"area") -alkPlot(WR.key,"lines") +alkPlot(WR.key,"lines") -alkPlot(WR.key,"splines") +alkPlot(WR.key,"splines") -alkPlot(WR.key,"splines",span=0.2) +alkPlot(WR.key,"splines",span=0.2) -alkPlot(WR.key,"bubble") +alkPlot(WR.key,"bubble") -alkPlot(WR.key,"bubble",col=col2rgbt("black",0.5)) +alkPlot(WR.key,"bubble",col=col2rgbt("black",0.5)) - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/binCI.html b/docs/reference/binCI.html index efcf9c56..299f723f 100644 --- a/docs/reference/binCI.html +++ b/docs/reference/binCI.html @@ -1,173 +1,183 @@ -Confidence intervals for binomial probability of success. — binCI • FSAConfidence intervals for binomial probability of success. — binCI • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Uses one of three methods to compute a confidence interval for the probability of success (p) in a binomial distribution.

    -
    -
    binCI(
    -  x,
    -  n,
    -  conf.level = 0.95,
    -  type = c("wilson", "exact", "asymptotic"),
    -  verbose = FALSE
    -)
    +
    +

    Usage

    +
    binCI(
    +  x,
    +  n,
    +  conf.level = 0.95,
    +  type = c("wilson", "exact", "asymptotic"),
    +  verbose = FALSE
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A single or vector of numbers that contains the number of observed successes.

    + +
    n

    A single or vector of numbers that contains the sample size.

    + +
    conf.level

    A single number that indicates the level of confidence (default is 0.95).

    + +
    type

    A string that identifies the type of method to use for the calculations. See details.

    + +
    verbose

    A logical that indicates whether x, n, and x/n should be included in the returned matrix (=TRUE) or not (=FALSE; DEFAULT).

    +
    -
    -

    Value

    -

    A #x2 matrix that contains the lower and upper confidence interval bounds as columns and, if verbose=TRUEx, n, and x/n .

    +
    +

    Value

    + + +

    A #x2 matrix that contains the lower and upper confidence interval bounds as columns and, if verbose=TRUE

    +

    +

    x, n, and x/n .

    -
    -

    Details

    +
    +

    Details

    This function will compute confidence interval for three possible methods chosen with the type argument.

    type="wilson"Wilson's (Journal of the American Statistical Association, 1927) confidence interval for a proportion. This is the score CI, based on inverting the asymptotic normal test using the null standard error.
    type="exact"Computes the Clopper/Pearson exact CI for a binomial success probability.
    type="asymptotic"This uses the normal distribution approximation.

    Note that Agresti and Coull (2000) suggest that the Wilson interval is the preferred method and is, thus, the default type.

    -
    -

    References

    +
    +

    References

    Agresti, A. and B.A. Coull. 1998. Approximate is better than “exact” for interval estimation of binomial proportions. American Statistician, 52:119-126.

    -
    -

    See also

    +
    +

    See also

    See binom.test; binconf in Hmisc; and functions in binom.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com, though this is largely based on binom.exact, binom.wilson, and binom.approx from the old epitools package.

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com, though this is largely based on binom.exact, binom.wilson, and binom.approx from the old epitools package.

    -
    -

    Examples

    -
    ## All types at once
    -binCI(7,20)
    +    
    +

    Examples

    +
    ## All types at once
    +binCI(7,20)
     #>              95% LCI   95% UCI
     #> Exact      0.1539092 0.5921885
     #> Wilson     0.1811918 0.5671457
     #> Asymptotic 0.1409627 0.5590373
    -
    -## Individual types
    -binCI(7,20,type="wilson")
    +
    +## Individual types
    +binCI(7,20,type="wilson")
     #>    95% LCI   95% UCI
     #>  0.1811918 0.5671457
    -binCI(7,20,type="exact")
    +binCI(7,20,type="exact")
     #>    95% LCI   95% UCI
     #>  0.1539092 0.5921885
    -binCI(7,20,type="asymptotic")
    +binCI(7,20,type="asymptotic")
     #>    95% LCI   95% UCI
     #>  0.1409627 0.5590373
    -binCI(7,20,type="asymptotic",verbose=TRUE)
    +binCI(7,20,type="asymptotic",verbose=TRUE)
     #>            x  n proportion   95% LCI   95% UCI
     #> Asymptotic 7 20       0.35 0.1409627 0.5590373
    -
    -## Multiple types
    -binCI(7,20,type=c("exact","asymptotic"))
    +
    +## Multiple types
    +binCI(7,20,type=c("exact","asymptotic"))
     #>              95% LCI   95% UCI
     #> Exact      0.1539092 0.5921885
     #> Asymptotic 0.1409627 0.5590373
    -binCI(7,20,type=c("exact","asymptotic"),verbose=TRUE)
    +binCI(7,20,type=c("exact","asymptotic"),verbose=TRUE)
     #>            x  n proportion   95% LCI   95% UCI
     #> Exact      7 20       0.35 0.1539092 0.5921885
     #> Asymptotic 7 20       0.35 0.1409627 0.5590373
    -
    -## Use with multiple inputs
    -binCI(c(7,10),c(20,30),type="wilson")
    +
    +## Use with multiple inputs
    +binCI(c(7,10),c(20,30),type="wilson")
     #>    95% LCI   95% UCI
     #>  0.1811918 0.5671457
     #>  0.1923050 0.5121995
    -binCI(c(7,10),c(20,30),type="wilson",verbose=TRUE)
    +binCI(c(7,10),c(20,30),type="wilson",verbose=TRUE)
     #>       x  n proportion   95% LCI   95% UCI
     #> [1,]  7 20  0.3500000 0.1811918 0.5671457
     #> [2,] 10 30  0.3333333 0.1923050 0.5121995
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/capHistConvert.html b/docs/reference/capHistConvert.html index 12499502..e5c0dcc7 100644 --- a/docs/reference/capHistConvert.html +++ b/docs/reference/capHistConvert.html @@ -1,110 +1,134 @@ -Convert between capture history data.frame formats. — capHistConvert • FSAConvert between capture history data.frame formats. — capHistConvert • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Use to convert between simple versions of several capture history data.frame formats -- “individual”, “frequency”, “event”, “MARK”, and “RMark”. The primary use is to convert to the “individual” format for use in capHistSum.

    -
    -
    capHistConvert(
    -  df,
    -  cols2use = NULL,
    -  cols2ignore = NULL,
    -  in.type = c("frequency", "event", "individual", "MARK", "marked", "RMark"),
    -  out.type = c("individual", "event", "frequency", "MARK", "marked", "RMark"),
    -  id = NULL,
    -  event.ord = NULL,
    -  freq = NULL,
    -  var.lbls = NULL,
    -  var.lbls.pre = "event",
    -  include.id = ifelse(is.null(id), FALSE, TRUE)
    -)
    +
    +

    Usage

    +
    capHistConvert(
    +  df,
    +  cols2use = NULL,
    +  cols2ignore = NULL,
    +  in.type = c("frequency", "event", "individual", "MARK", "marked", "RMark"),
    +  out.type = c("individual", "event", "frequency", "MARK", "marked", "RMark"),
    +  id = NULL,
    +  event.ord = NULL,
    +  freq = NULL,
    +  var.lbls = NULL,
    +  var.lbls.pre = "event",
    +  include.id = ifelse(is.null(id), FALSE, TRUE)
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    df

    A data.frame that contains the capture histories and, perhaps, a unique fish identifier or frequency variable. See details.

    + +
    cols2use

    A string or numeric vector that indicates columns in df to use. Negative numeric values will not use those columns. Cannot use both cols2use and col2ignore.

    + +
    cols2ignore

    A string or numeric vector that indicates columns in df to ignore. Typical columns to ignore are those that are not either in id= or freq= or part of the capture history data. Cannot use both cols2use and col2ignore.

    + +
    in.type

    A single string that indicates the type of capture history format to convert FROM.

    + +
    out.type

    A single string that indicates the type of capture history format to convert TO.

    + +
    id

    A string or numeric that indicates the column in df that contains the unique identifier for an individual fish. This argument is only used if in.type="event", in.type="individual", or, possibly, in.type="RMark".

    + +
    event.ord

    A string that contains a vector of ordered levels to be used when in.type="event". The default is to order alphabetically which may not be desirable if, for example, the events are labeled as ‘first’, ‘second’, ‘third’, and ‘fourth’. In this case, use event.ord=c("first","second","third","fourth").

    + +
    freq

    A string or numeric that indicates the column in df that contains the frequency of individual fish corresponding to a capture history. This argument is only used if in.type="MARK", in.type="frequency", or, possibly, in.type="RMark".

    + +
    var.lbls

    A string vector of labels for the columns that contain the returned individual or frequency capture histories. If var.lbls=NULL or the length is different then the number of events then default labels using var.lbls.pre will be used. This argument is only used if out.type="frequency" or out.type="individual".

    + +
    var.lbls.pre

    A single string used as a prefix for the labels of the columns that contain the returned individual or frequency capture histories. This prefix will be appended with a number corresponding to the sample event. This argument is only used if out.type="frequency" or out.type="individual" and will be ignored if a proper vector is given in var.lbls.

    + +
    include.id

    A logical that indicates whether a unique fish identifier variable/column should be included in the output data.frame. This argument is only used if out.type="individual" or out.type="RMark".

    +
    -
    -

    Value

    -

    A data frame of the proper type given in out.type is returned. See details.

    +
    +

    Value

    + + +

    A data frame of the proper type given in out.type is returned. See details.

    -
    -

    Details

    +
    +

    Details

    capHistSum requires capture histories to be recorded in the “individual” format. In this format, the data frame contains (at least) as many columns as sample events and as many rows as individually tagged fish. Optionally, the data.frame may also contain a column with unique fish identifiers (e.g., tag numbers). Each cell in the capture history portion of the data.frame contains a ‘0’ if the fish of that row was NOT seen in the event of that column and a ‘1’ if the fish of that row WAS seen in the event of that column. For example, suppose that five fish were marked on four sampling events; fish ‘17’ was captured on the first two events; fish ‘18’ was captured on the first and third events; fish ‘19’ was captured on only the third event; fish ‘20’ was captured on only the fourth event; and fish ‘21’ was captured on the first and second events. The “individual” capture history date.frame for these data looks like:

    fishevent1event2event3event4
    171100
    181010
    190010
    200001
    211100

    The “frequency” format data.frame (this format is used in Rcapture) has unique capture histories in separate columns, as in the “individual” format, but also includes a column with the frequency of individuals that had the capture history of that row. It will not contain a fish identifier variable. The same data from above looks like:

    event1event2event3event4freq
    11002
    10101
    00101
    00011

    The “event” format data.frame has a column with the unique fish identifier and a column with the event in which the fish of that row was observed. The same data from above looks like:

    @@ -113,36 +137,36 @@

    Details

    fishch
    171100
    181010
    190010
    200001
    211100

    However, if augmented with a frequency variable then the same data from above looks like:

    chfreq
    00011
    00101
    10101
    11002

    Each of the formats can be used to convert from (i.e., in in.type=) or to convert to (i.e., in out.type=) with the exception that only the individual fish identifier version can be converted to when out.type="RMark".

    -
    -

    Note

    +
    +

    Note

    The formats as used here are simple in the sense that one is only allowed to have the individual fish identifier or the frequency variable in addition to the capture history information. More complex analyses may use a number of covariates. For these more complex analyses, one should work directly with the Rcapture, RMark, or marked packages.

    This function also assumes that all unmarked captured fish are marked and returned to the population (i.e., no losses at the time of marking are allowed).

    -
    -

    Warning

    +
    +

    Warning

    capHistConvert may give unwanted results if the data are in.type="event" but there are unused levels for the variable, as would result if the data.frame had been subsetted on the event variable. The unwanted results can be corrected by using droplevels before capHistConvert. See the last example for an example.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    9-Abundance from Capture-Recapture Data.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    See also

    +
    +

    See also

    See capHistSum to summarize “individual” capture histories into a format usable in mrClosed and mrOpen. Also see Rcapture, RMark, or marked packages for handling more complex analyses.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## A small example of 'event' format
    -( ex1 <- data.frame(fish=c(17,18,21,17,21,18,19,20),yr=c(1987,1987,1987,1988,1988,1989,1989,1990)) )
    +    
    +

    Examples

    +
    ## A small example of 'event' format
    +( ex1 <- data.frame(fish=c(17,18,21,17,21,18,19,20),yr=c(1987,1987,1987,1988,1988,1989,1989,1990)) )
     #>   fish   yr
     #> 1   17 1987
     #> 2   18 1987
    @@ -152,62 +176,62 @@ 

    Examples

    #> 6 18 1989 #> 7 19 1989 #> 8 20 1990 -# convert to 'individual' format -( ex1.E2I <- capHistConvert(ex1,id="fish",in.type="event") ) +# convert to 'individual' format +( ex1.E2I <- capHistConvert(ex1,id="fish",in.type="event") ) #> fish 1987 1988 1989 1990 #> 1 17 1 1 0 0 #> 2 18 1 0 1 0 #> 3 19 0 0 1 0 #> 4 20 0 0 0 1 #> 5 21 1 1 0 0 -# convert to 'frequency' format -( ex1.E2F <- capHistConvert(ex1,id="fish",in.type="event",out.type="frequency") ) +# convert to 'frequency' format +( ex1.E2F <- capHistConvert(ex1,id="fish",in.type="event",out.type="frequency") ) #> 1987 1988 1989 1990 freq #> 1 0 0 0 1 1 #> 2 0 0 1 0 1 #> 3 1 0 1 0 1 #> 4 1 1 0 0 2 -# convert to 'MARK' format -( ex1.E2M <- capHistConvert(ex1,id="fish",in.type="event",out.type="MARK") ) +# convert to 'MARK' format +( ex1.E2M <- capHistConvert(ex1,id="fish",in.type="event",out.type="MARK") ) #> ch freq #> 1 0001 1; #> 2 0010 1; #> 3 1010 1; #> 4 1100 2; -# convert to 'RMark' format -( ex1.E2R <- capHistConvert(ex1,id="fish",in.type="event",out.type="RMark") ) +# convert to 'RMark' format +( ex1.E2R <- capHistConvert(ex1,id="fish",in.type="event",out.type="RMark") ) #> fish ch #> 1 17 1100 #> 2 18 1010 #> 3 19 0010 #> 4 20 0001 #> 5 21 1100 - -## convert converted 'individual' format ... -# to 'frequency' format (must ignore "id") -( ex1.I2F <- capHistConvert(ex1.E2I,id="fish",in.type="individual",out.type="frequency") ) + +## convert converted 'individual' format ... +# to 'frequency' format (must ignore "id") +( ex1.I2F <- capHistConvert(ex1.E2I,id="fish",in.type="individual",out.type="frequency") ) #> 1987 1988 1989 1990 freq #> 1 0 0 0 1 1 #> 2 0 0 1 0 1 #> 3 1 0 1 0 1 #> 4 1 1 0 0 2 -# to 'MARK' format -( ex1.I2M <- capHistConvert(ex1.E2I,id="fish",in.type="individual",out.type="MARK") ) +# to 'MARK' format +( ex1.I2M <- capHistConvert(ex1.E2I,id="fish",in.type="individual",out.type="MARK") ) #> ch freq #> 1 0001 1; #> 2 0010 1; #> 3 1010 1; #> 4 1100 2; -# to 'RMark' format -( ex1.I2R <- capHistConvert(ex1.E2I,id="fish",in.type="individual",out.type="RMark") ) +# to 'RMark' format +( ex1.I2R <- capHistConvert(ex1.E2I,id="fish",in.type="individual",out.type="RMark") ) #> fish ch #> 1 17 1100 #> 2 18 1010 #> 3 19 0010 #> 4 20 0001 #> 5 21 1100 -# to 'event' format -( ex1.I2E <- capHistConvert(ex1.E2I,id="fish",in.type="individual",out.type="event") ) +# to 'event' format +( ex1.I2E <- capHistConvert(ex1.E2I,id="fish",in.type="individual",out.type="event") ) #> fish event #> 1 17 1987 #> 2 18 1987 @@ -217,51 +241,51 @@

    Examples

    #> 6 18 1989 #> 7 19 1989 #> 8 20 1990 - -#' ## convert converted 'frequency' format ... -# to 'individual' format -( ex1.F2I <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency") ) + +#' ## convert converted 'frequency' format ... +# to 'individual' format +( ex1.F2I <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency") ) #> 1987 1988 1989 1990 #> 1 0 0 0 1 #> 2 0 0 1 0 #> 3 1 0 1 0 #> 4 1 1 0 0 #> 5 1 1 0 0 -( ex1.F2Ia <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency",include.id=TRUE) ) +( ex1.F2Ia <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency",include.id=TRUE) ) #> id 1987 1988 1989 1990 #> 1 1 0 0 0 1 #> 2 2 0 0 1 0 #> 3 3 1 0 1 0 #> 4 4 1 1 0 0 #> 5 5 1 1 0 0 -# to 'Mark' format -( ex1.F2M <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency", - out.type="MARK") ) +# to 'Mark' format +( ex1.F2M <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency", + out.type="MARK") ) #> ch freq #> 1 0001 1; #> 2 0010 1; #> 3 1010 1; #> 4 1100 2; -# to 'RMark' format -( ex1.F2R <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency", - out.type="RMark") ) +# to 'RMark' format +( ex1.F2R <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency", + out.type="RMark") ) #> ch #> 1 0001 #> 2 0010 #> 3 1010 #> 4 1100 #> 5 1100 -( ex1.F2Ra <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency", - out.type="RMark",include.id=TRUE) ) +( ex1.F2Ra <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency", + out.type="RMark",include.id=TRUE) ) #> id ch #> 1 1 0001 #> 2 2 0010 #> 3 3 1010 #> 4 4 1100 #> 5 5 1100 -# to 'event' format -( ex1.F2E <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency", - out.type="event") ) +# to 'event' format +( ex1.F2E <- capHistConvert(ex1.E2F,freq="freq",in.type="frequency", + out.type="event") ) #> id event #> 1 3 1987 #> 2 4 1987 @@ -271,47 +295,47 @@

    Examples

    #> 6 2 1989 #> 7 3 1989 #> 8 1 1990 - -## convert converted 'MARK' format ... -# to 'individual' format -( ex1.M2I <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK") ) + +## convert converted 'MARK' format ... +# to 'individual' format +( ex1.M2I <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK") ) #> event1 event2 event3 event4 #> 1 0 0 0 1 #> 2 0 0 1 0 #> 3 1 0 1 0 #> 4 1 1 0 0 #> 5 1 1 0 0 -( ex1.M2Ia <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK",include.id=TRUE) ) +( ex1.M2Ia <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK",include.id=TRUE) ) #> id event1 event2 event3 event4 #> 1 1 0 0 0 1 #> 2 2 0 0 1 0 #> 3 3 1 0 1 0 #> 4 4 1 1 0 0 #> 5 5 1 1 0 0 -# to 'frequency' format -( ex1.M2F <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK",out.type="frequency") ) +# to 'frequency' format +( ex1.M2F <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK",out.type="frequency") ) #> event1 event2 event3 event4 freq #> 1 0 0 0 1 1 #> 2 0 0 1 0 1 #> 3 1 0 1 0 1 #> 4 1 1 0 0 2 -# to 'RMark' format -( ex1.M2R <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK",out.type="RMark") ) +# to 'RMark' format +( ex1.M2R <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK",out.type="RMark") ) #> ch #> 1 0001 #> 2 0010 #> 3 1010 #> 4 1100 #> 5 1100 -( ex1.M2Ra <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK",out.type="RMark",include.id=TRUE) ) +( ex1.M2Ra <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK",out.type="RMark",include.id=TRUE) ) #> id ch #> 1 1 0001 #> 2 2 0010 #> 3 3 1010 #> 4 4 1100 #> 5 5 1100 -# to 'event' format -( ex1.M2E <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK",out.type="event") ) +# to 'event' format +( ex1.M2E <- capHistConvert(ex1.E2M,freq="freq",in.type="MARK",out.type="event") ) #> id event #> 1 3 event1 #> 2 4 event1 @@ -321,32 +345,32 @@

    Examples

    #> 6 2 event3 #> 7 3 event3 #> 8 1 event4 - -## convert converted 'RMark' format ... -# to 'individual' format -( ex1.R2I <- capHistConvert(ex1.E2R,id="fish",in.type="RMark") ) + +## convert converted 'RMark' format ... +# to 'individual' format +( ex1.R2I <- capHistConvert(ex1.E2R,id="fish",in.type="RMark") ) #> fish event1 event2 event3 event4 #> 1 17 1 1 0 0 #> 2 18 1 0 1 0 #> 3 19 0 0 1 0 #> 4 20 0 0 0 1 #> 5 21 1 1 0 0 -# to 'frequency' format -( ex1.R2F <- capHistConvert(ex1.E2R,id="fish",in.type="RMark",out.type="frequency") ) +# to 'frequency' format +( ex1.R2F <- capHistConvert(ex1.E2R,id="fish",in.type="RMark",out.type="frequency") ) #> event1 event2 event3 event4 freq #> 1 0 0 0 1 1 #> 2 0 0 1 0 1 #> 3 1 0 1 0 1 #> 4 1 1 0 0 2 -# to 'MARK' format -( ex1.R2M <- capHistConvert(ex1.E2R,id="fish",in.type="RMark",out.type="MARK") ) +# to 'MARK' format +( ex1.R2M <- capHistConvert(ex1.E2R,id="fish",in.type="RMark",out.type="MARK") ) #> ch freq #> 1 0001 1; #> 2 0010 1; #> 3 1010 1; #> 4 1100 2; -# to 'event' format -( ex1.R2E <- capHistConvert(ex1.E2R,id="fish",in.type="RMark",out.type="event") ) +# to 'event' format +( ex1.R2E <- capHistConvert(ex1.E2R,id="fish",in.type="RMark",out.type="event") ) #> fish event #> 1 17 event1 #> 2 18 event1 @@ -356,47 +380,47 @@

    Examples

    #> 6 18 event3 #> 7 19 event3 #> 8 20 event4 - -## Remove semi-colon from MARK format to make a RMark 'frequency' format -ex1.E2R1 <- ex1.E2M -ex1.E2R1$freq <- as.numeric(sub(";","",ex1.E2R1$freq)) -ex1.E2R1 + +## Remove semi-colon from MARK format to make a RMark 'frequency' format +ex1.E2R1 <- ex1.E2M +ex1.E2R1$freq <- as.numeric(sub(";","",ex1.E2R1$freq)) +ex1.E2R1 #> ch freq #> 1 0001 1 #> 2 0010 1 #> 3 1010 1 #> 4 1100 2 -# convert this to 'individual' format -( ex1.R2I1 <- capHistConvert(ex1.E2R1,freq="freq",in.type="RMark") ) +# convert this to 'individual' format +( ex1.R2I1 <- capHistConvert(ex1.E2R1,freq="freq",in.type="RMark") ) #> event1 event2 event3 event4 #> 1 0 0 0 1 #> 2 0 0 1 0 #> 3 1 0 1 0 #> 4 1 1 0 0 #> 5 1 1 0 0 -( ex1.R2I1a <- capHistConvert(ex1.E2R1,freq="freq",in.type="RMark",include.id=TRUE) ) +( ex1.R2I1a <- capHistConvert(ex1.E2R1,freq="freq",in.type="RMark",include.id=TRUE) ) #> id event1 event2 event3 event4 #> 1 1 0 0 0 1 #> 2 2 0 0 1 0 #> 3 3 1 0 1 0 #> 4 4 1 1 0 0 #> 5 5 1 1 0 0 -# convert this to 'frequency' format -( ex1.R2F1 <- capHistConvert(ex1.E2R1,freq="freq",in.type="RMark",out.type="frequency") ) +# convert this to 'frequency' format +( ex1.R2F1 <- capHistConvert(ex1.E2R1,freq="freq",in.type="RMark",out.type="frequency") ) #> event1 event2 event3 event4 freq #> 1 0 0 0 1 1 #> 2 0 0 1 0 1 #> 3 1 0 1 0 1 #> 4 1 1 0 0 2 -# convert this to 'MARK' format -( ex1.R2M1 <- capHistConvert(ex1.E2R1,freq="freq",in.type="RMark",out.type="MARK") ) +# convert this to 'MARK' format +( ex1.R2M1 <- capHistConvert(ex1.E2R1,freq="freq",in.type="RMark",out.type="MARK") ) #> ch freq #> 1 0001 1; #> 2 0010 1; #> 3 1010 1; #> 4 1100 2; -# convert this to 'event' format -( ex1.R2E1 <- capHistConvert(ex1.E2R1,freq="freq",in.type="RMark",out.type="event") ) +# convert this to 'event' format +( ex1.R2E1 <- capHistConvert(ex1.E2R1,freq="freq",in.type="RMark",out.type="event") ) #> id event #> 1 3 event1 #> 2 4 event1 @@ -406,12 +430,12 @@

    Examples

    #> 6 2 event3 #> 7 3 event3 #> 8 1 event4 - - -######################################################################## -## A small example using character ids -( ex2 <- data.frame(fish=c("id17","id18","id21","id17","id21","id18","id19","id20"), - yr=c(1987,1987,1987,1988,1988,1989,1989,1990)) ) + + +######################################################################## +## A small example using character ids +( ex2 <- data.frame(fish=c("id17","id18","id21","id17","id21","id18","id19","id20"), + yr=c(1987,1987,1987,1988,1988,1989,1989,1990)) ) #> fish yr #> 1 id17 1987 #> 2 id18 1987 @@ -421,62 +445,62 @@

    Examples

    #> 6 id18 1989 #> 7 id19 1989 #> 8 id20 1990 -# convert to 'individual' format -( ex2.E2I <- capHistConvert(ex2,id="fish",in.type="event") ) +# convert to 'individual' format +( ex2.E2I <- capHistConvert(ex2,id="fish",in.type="event") ) #> fish 1987 1988 1989 1990 #> 1 id17 1 1 0 0 #> 2 id18 1 0 1 0 #> 3 id19 0 0 1 0 #> 4 id20 0 0 0 1 #> 5 id21 1 1 0 0 -# convert to 'frequency' format -( ex2.E2F <- capHistConvert(ex2,id="fish",in.type="event",out.type="frequency") ) +# convert to 'frequency' format +( ex2.E2F <- capHistConvert(ex2,id="fish",in.type="event",out.type="frequency") ) #> 1987 1988 1989 1990 freq #> 1 0 0 0 1 1 #> 2 0 0 1 0 1 #> 3 1 0 1 0 1 #> 4 1 1 0 0 2 -# convert to 'MARK' format -( ex2.E2M <- capHistConvert(ex2,id="fish",in.type="event",out.type="MARK") ) +# convert to 'MARK' format +( ex2.E2M <- capHistConvert(ex2,id="fish",in.type="event",out.type="MARK") ) #> ch freq #> 1 0001 1; #> 2 0010 1; #> 3 1010 1; #> 4 1100 2; -# convert to 'RMark' format -( ex2.E2R <- capHistConvert(ex2,id="fish",in.type="event",out.type="RMark") ) +# convert to 'RMark' format +( ex2.E2R <- capHistConvert(ex2,id="fish",in.type="event",out.type="RMark") ) #> fish ch #> 1 id17 1100 #> 2 id18 1010 #> 3 id19 0010 #> 4 id20 0001 #> 5 id21 1100 - -## convert converted 'individual' format ... -# to 'frequency' format -( ex2.I2F <- capHistConvert(ex2.E2I,id="fish",in.type="individual",out.type="frequency") ) + +## convert converted 'individual' format ... +# to 'frequency' format +( ex2.I2F <- capHistConvert(ex2.E2I,id="fish",in.type="individual",out.type="frequency") ) #> 1987 1988 1989 1990 freq #> 1 0 0 0 1 1 #> 2 0 0 1 0 1 #> 3 1 0 1 0 1 #> 4 1 1 0 0 2 -# to 'MARK' format -( ex2.I2M <- capHistConvert(ex2.E2I,id="fish",in.type="individual",out.type="MARK") ) +# to 'MARK' format +( ex2.I2M <- capHistConvert(ex2.E2I,id="fish",in.type="individual",out.type="MARK") ) #> ch freq #> 1 0001 1; #> 2 0010 1; #> 3 1010 1; #> 4 1100 2; -# to 'RMark' format -( ex2.I2R <- capHistConvert(ex2.E2I,id="fish",in.type="individual",out.type="RMark") ) +# to 'RMark' format +( ex2.I2R <- capHistConvert(ex2.E2I,id="fish",in.type="individual",out.type="RMark") ) #> fish ch #> 1 id17 1100 #> 2 id18 1010 #> 3 id19 0010 #> 4 id20 0001 #> 5 id21 1100 -# to 'event' format -( ex2.I2E <- capHistConvert(ex2.E2I,id="fish",in.type="individual",out.type="event") ) +# to 'event' format +( ex2.I2E <- capHistConvert(ex2.E2I,id="fish",in.type="individual",out.type="event") ) #> fish event #> 1 id17 1987 #> 2 id18 1987 @@ -486,26 +510,26 @@

    Examples

    #> 6 id18 1989 #> 7 id19 1989 #> 8 id20 1990 - -## demo use of var.lbls -( ex2.E2Ia <- capHistConvert(ex2,id="fish",in.type="event",var.lbls.pre="Sample") ) + +## demo use of var.lbls +( ex2.E2Ia <- capHistConvert(ex2,id="fish",in.type="event",var.lbls.pre="Sample") ) #> fish 1987 1988 1989 1990 #> 1 id17 1 1 0 0 #> 2 id18 1 0 1 0 #> 3 id19 0 0 1 0 #> 4 id20 0 0 0 1 #> 5 id21 1 1 0 0 -( ex2.E2Ib <- capHistConvert(ex2,id="fish",in.type="event", - var.lbls=c("first","second","third","fourth")) ) +( ex2.E2Ib <- capHistConvert(ex2,id="fish",in.type="event", + var.lbls=c("first","second","third","fourth")) ) #> fish first second third fourth #> 1 id17 1 1 0 0 #> 2 id18 1 0 1 0 #> 3 id19 0 0 1 0 #> 4 id20 0 0 0 1 #> 5 id21 1 1 0 0 - -## demo use of event.ord -( ex2.I2Ea <- capHistConvert(ex2.E2Ib,id="fish",in.type="individual",out.type="event") ) + +## demo use of event.ord +( ex2.I2Ea <- capHistConvert(ex2.E2Ib,id="fish",in.type="individual",out.type="event") ) #> fish event #> 1 id17 first #> 2 id18 first @@ -515,73 +539,73 @@

    Examples

    #> 6 id18 third #> 7 id19 third #> 8 id20 fourth -( ex2.E2Ibad <- capHistConvert(ex2.I2Ea,id="fish",in.type="event") ) +( ex2.E2Ibad <- capHistConvert(ex2.I2Ea,id="fish",in.type="event") ) #> fish first fourth second third #> 1 id17 1 0 1 0 #> 2 id18 1 0 0 1 #> 3 id19 0 0 0 1 #> 4 id20 0 1 0 0 #> 5 id21 1 0 1 0 -( ex2.E2Igood <- capHistConvert(ex2.I2Ea,id="fish",in.type="event", - event.ord=c("first","second","third","fourth")) ) +( ex2.E2Igood <- capHistConvert(ex2.I2Ea,id="fish",in.type="event", + event.ord=c("first","second","third","fourth")) ) #> fish first second third fourth #> 1 id17 1 1 0 0 #> 2 id18 1 0 1 0 #> 3 id19 0 0 1 0 #> 4 id20 0 0 0 1 #> 5 id21 1 1 0 0 - -## ONLY RUN IN INTERACTIVE MODE -if (interactive()) { - -######################################################################## -## A larger example of 'frequency' format (data from Rcapture package) -data(bunting,package="Rcapture") -head(bunting) -# convert to 'individual' format -bun.F2I <- capHistConvert(bunting,in.type="frequency",freq="freq") -head(bun.F2I) -# convert to 'MARK' format -bun.F2M <- capHistConvert(bunting,id="id",in.type="frequency",freq="freq",out.type="MARK") -head(bun.F2M) -# convert converted 'individual' back to 'MARK' format -bun.I2M <- capHistConvert(bun.F2I,id="id",in.type="individual",out.type="MARK") -head(bun.I2M) -# convert converted 'individual' back to 'frequency' format -bun.I2F <- capHistConvert(bun.F2I,id="id",in.type="individual", - out.type="frequency",var.lbls.pre="Sample") -head(bun.I2F) - - -######################################################################## -## A larger example of 'marked' or 'RMark' format, but with a covariate -## and when the covariate is removed there is no frequency or individual -## fish identifier. -data(dipper,package="marked") -head(dipper) -# isolate males and females -dipperF <- subset(dipper,sex=="Female") -dipperM <- subset(dipper,sex=="Male") -# convert females to 'individual' format -dipF.R2I <- capHistConvert(dipperF,cols2ignore="sex",in.type="RMark") -head(dipF.R2I) -# convert males to 'individual' format -dipM.R2I <- capHistConvert(dipperM,cols2ignore="sex",in.type="RMark") -head(dipM.R2I) -# add sex variable to each data.frame and then combine -dipF.R2I$sex <- "Female" -dipM.R2I$sex <- "Male" -dip.R2I <- rbind(dipF.R2I,dipM.R2I) -head(dip.R2I) -tail(dip.R2I) - -} # end interactive - - -## An example of problem with unused levels -## Create a set of test data with several groups -( df <- data.frame(fish=c("id17","id18","id21","id17","id21","id18","id19","id20","id17"), - group=c("B1","B1","B1","B2","B2","B3","B4","C1","C1")) ) + +## ONLY RUN IN INTERACTIVE MODE +if (interactive()) { + +######################################################################## +## A larger example of 'frequency' format (data from Rcapture package) +data(bunting,package="Rcapture") +head(bunting) +# convert to 'individual' format +bun.F2I <- capHistConvert(bunting,in.type="frequency",freq="freq") +head(bun.F2I) +# convert to 'MARK' format +bun.F2M <- capHistConvert(bunting,id="id",in.type="frequency",freq="freq",out.type="MARK") +head(bun.F2M) +# convert converted 'individual' back to 'MARK' format +bun.I2M <- capHistConvert(bun.F2I,id="id",in.type="individual",out.type="MARK") +head(bun.I2M) +# convert converted 'individual' back to 'frequency' format +bun.I2F <- capHistConvert(bun.F2I,id="id",in.type="individual", + out.type="frequency",var.lbls.pre="Sample") +head(bun.I2F) + + +######################################################################## +## A larger example of 'marked' or 'RMark' format, but with a covariate +## and when the covariate is removed there is no frequency or individual +## fish identifier. +data(dipper,package="marked") +head(dipper) +# isolate males and females +dipperF <- subset(dipper,sex=="Female") +dipperM <- subset(dipper,sex=="Male") +# convert females to 'individual' format +dipF.R2I <- capHistConvert(dipperF,cols2ignore="sex",in.type="RMark") +head(dipF.R2I) +# convert males to 'individual' format +dipM.R2I <- capHistConvert(dipperM,cols2ignore="sex",in.type="RMark") +head(dipM.R2I) +# add sex variable to each data.frame and then combine +dipF.R2I$sex <- "Female" +dipM.R2I$sex <- "Male" +dip.R2I <- rbind(dipF.R2I,dipM.R2I) +head(dip.R2I) +tail(dip.R2I) + +} # end interactive + + +## An example of problem with unused levels +## Create a set of test data with several groups +( df <- data.frame(fish=c("id17","id18","id21","id17","id21","id18","id19","id20","id17"), + group=c("B1","B1","B1","B2","B2","B3","B4","C1","C1")) ) #> fish group #> 1 id17 B1 #> 2 id18 B1 @@ -592,8 +616,8 @@

    Examples

    #> 7 id19 B4 #> 8 id20 C1 #> 9 id17 C1 -# Let's assume the user wants to subset the data from the "B" group -( df1 <- subset(df,group %in% c("B1","B2","B3","B4")) ) +# Let's assume the user wants to subset the data from the "B" group +( df1 <- subset(df,group %in% c("B1","B2","B3","B4")) ) #> fish group #> 1 id17 B1 #> 2 id18 B1 @@ -602,45 +626,41 @@

    Examples

    #> 5 id21 B2 #> 6 id18 B3 #> 7 id19 B4 -# Looks like capHistConvert() is still using the unused factor -# level from group C -capHistConvert(df1,id="fish",in.type="event") +# Looks like capHistConvert() is still using the unused factor +# level from group C +capHistConvert(df1,id="fish",in.type="event") #> fish B1 B2 B3 B4 #> 1 id17 1 1 0 0 #> 2 id18 1 0 1 0 #> 3 id19 0 0 0 1 #> 4 id21 1 1 0 0 -# use droplevels() to remove the unused groups and no problem -df1 <- droplevels(df1) -capHistConvert(df1,id="fish",in.type="event") +# use droplevels() to remove the unused groups and no problem +df1 <- droplevels(df1) +capHistConvert(df1,id="fish",in.type="event") #> fish B1 B2 B3 B4 #> 1 id17 1 1 0 0 #> 2 id18 1 0 1 0 #> 3 id19 0 0 0 1 #> 4 id21 1 1 0 0 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/capHistSum.html b/docs/reference/capHistSum.html index b697fb0b..83fb5219 100644 --- a/docs/reference/capHistSum.html +++ b/docs/reference/capHistSum.html @@ -1,135 +1,159 @@ -Summarize capture histories in individual fish format. — capHistSum • FSASummarize capture histories in individual fish format. — capHistSum • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Use to summarize a capture history data file that is in the “individual” fish format (see capHistConvert for a discussion of data file format types). Summarized capture history results may be used in the Lincoln-Petersen, Schnabel, Schumacher-Eschmeyer, or Jolly-Seber methods for estimating population abundance (see mrClosed and mrOpen).

    -
    -
    capHistSum(df, cols2use = NULL, cols2ignore = NULL)
    -
    -is.CapHist(x)
    -
    -# S3 method for CapHist
    -plot(x, what = c("u", "f"), pch = 19, cex.pch = 0.7, lwd = 1, ...)
    +
    +

    Usage

    +
    capHistSum(df, cols2use = NULL, cols2ignore = NULL)
    +
    +is.CapHist(x)
    +
    +# S3 method for CapHist
    +plot(x, what = c("u", "f"), pch = 19, cex.pch = 0.7, lwd = 1, ...)
    -
    -

    Arguments

    +
    +

    Arguments

    df

    A data.frame that contains the capture histories (and, perhaps, other information) in “individual” fish format. See details.

    + +
    cols2use

    A string or numeric vector that indicates columns in df that contain the capture histories. Negative numeric values will not use those columns. Cannot use both cols2use and col2ignore. See details.

    + +
    cols2ignore

    A string or numeric vector that indicates columns in df that do not contain the capture histories and should be ignored. Cannot use both cols2use and col2ignore.

    + +
    x

    An object from capHistSum.

    + +
    what

    A string that indicates what type of diagnostic plot to construct with plot. See details.

    + +
    pch

    A numeric that indicates the plotting character for the diagnostic plot.

    + +
    cex.pch

    A numeric that indicates the character expansion value for the plotting characters in the diagnostic plot. The default is to be “slightly smaller” (i.e., cex.pch=0.7).

    + +
    lwd

    A numeric that indicates the line width in the diagnostic plot.

    + +
    ...

    Optional arguments to send to plot.

    +
    -
    -

    Value

    -

    If the capture history data file represents only two samples, then a list with the following two components is returned.

    • caphist A vector summarizing the frequency of fish with each capture history.

    • +
      +

      Value

      + + +

      If the capture history data file represents only two samples, then a list with the following two components is returned.

      +

      +
      • caphist A vector summarizing the frequency of fish with each capture history.

      • sum A data.frame that contains the number of marked fish from the first sample (M), the number of captured fish in the second sample (n), and the number of recaptured (i.e. previously marked) fish in the second sample (m).

      • -

      If the capture history data file represents more than two samples, then a list with the following five components is returned

      • caphist A vector summarizing the frequency of fish with each capture history.

      • +

      If the capture history data file represents more than two samples, then a list with the following five components is returned

      +

      +
      • caphist A vector summarizing the frequency of fish with each capture history.

      • sum A data frame that contains the the number of captured fish in the ith sample (n), the number of recaptured (i.e. previously marked) fish in the ith sample (m), the number of marked fish returned to the population following the ith sample (R; this will equal n as the function currently does not handle mortalities); the number of marked fish in the population prior to the ith sample (M); the number of fish first seen in the ith sample (u); the number of fish last seen in the ith sample (v); and the number of fish seen i times (f).

      • methodB.top A matrix that contains the top of the Method B table used for the Jolly-Seber method (i.e., a contingency table of capture sample (columns) and last seen sample (rows)).

      • methodB.bot A data.frame that contains the bottom of the Method B table used for the Jolly-Seber method (i.e., the number of marked fish in the sample (m), the number of unmarked fish in the sample (u), the total number of fish in the sample (n), and the number of marked fish returned to the population following the sample (R).

      • m.array A matrix that contains the the so-called “m-array”. The first column contains the number of fish captured on the ith event. The columns labeled with “cX” prefix show the number of fish originally captured in the ith row that were captured in the Xth event. The last column shows the number of fish originally captured in the ith row that were never recaptured.

      -
      -

      Details

      +
      +

      Details

      This function requires the capture history data file to be in the “individual” fish format. See capHistConvert for a description of this (and other) formats and for methods to convert from other formats to the “individual” fish format. In addition, this function requires only the capture history portion of the data file. Thus, if df contains columns with non-capture history information (e.g., fish ID, length, location, etc.) then use cols2use= to identify which columns contain only the capture history information. Columns to use can be identified by listing the column numbers (e.g., columns 2 through 7 could be included with cols2use=2:7). In many instances it may be easier to identify columns to exclude which can be done by preceding the column number by a negative sign (e.g., columns 1 through 3 are excluded with cols2use=-(1:3)).

      The object returned from this function can be used directly in mrClosed and mrOpen. See examples of this functionality on the help pages for those functions.

      The plot function can be used to construct the two diagnostic plots described by Baillargeon and Rivest (2007). The what="f" plot will plot the log of the number of fish seen i times divided by choose(t,i) against i. The what="u" plot will plot the log of the number of fish seen for the first time on event i against i. Baillargeon and Rivest (2007) provide a table that can be used to diagnosed types of heterogeneities in capture probabilities from these plots.

      -
      -

      Note

      +
      +

      Note

      This function assumes that all unmarked captured fish are marked and returned to the population (i.e., no losses at the time of marking are allowed).

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      9-Abundance from Capture-Recapture Data.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Baillargeon, S. and Rivest, L.-P. (2007). Rcapture: Loglinear models for capture-recapture in R. Journal of Statistical Software, 19(5):1-31.

      -
      -

      See also

      +
      +

      See also

      See descriptive in Rcapture for m.array and some of the same values in sum. See capHistConvert for a descriptions of capture history data file formats and how to convert between them. See mrClosed and mrOpen for how to estimate abundance from the summarized capture history information.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      # data.frame with IDs in the first column
      -head(PikeNYPartial1)
      +    
      +

      Examples

      +
      # data.frame with IDs in the first column
      +head(PikeNYPartial1)
       #>     id first second third fourth
       #> 1 2001     1      0     0      0
       #> 2 2002     1      0     0      0
      @@ -137,9 +161,9 @@ 

      Examples

      #> 4 2004 1 0 0 0 #> 5 2005 1 0 0 0 #> 6 2006 1 0 0 0 - -# Three ways to ignore first column of ID numbers -( ch1 <- capHistSum(PikeNYPartial1,cols2use=-1) ) + +# Three ways to ignore first column of ID numbers +( ch1 <- capHistSum(PikeNYPartial1,cols2use=-1) ) #> $caphist #> #> 0001 0010 0011 0100 0101 0110 1000 1001 1010 1100 @@ -175,7 +199,7 @@

      Examples

      #> #> attr(,"class") #> [1] "CapHist" -( ch1 <- capHistSum(PikeNYPartial1,cols2ignore=1) ) +( ch1 <- capHistSum(PikeNYPartial1,cols2ignore=1) ) #> $caphist #> #> 0001 0010 0011 0100 0101 0110 1000 1001 1010 1100 @@ -211,7 +235,7 @@

      Examples

      #> #> attr(,"class") #> [1] "CapHist" -( ch1 <- capHistSum(PikeNYPartial1,cols2ignore="id") ) +( ch1 <- capHistSum(PikeNYPartial1,cols2ignore="id") ) #> $caphist #> #> 0001 0010 0011 0100 0101 0110 1000 1001 1010 1100 @@ -247,17 +271,17 @@

      Examples

      #> #> attr(,"class") #> [1] "CapHist" - -# diagnostic plots -plot(ch1) + +# diagnostic plots +plot(ch1) -plot(ch1,what="f") +plot(ch1,what="f") -plot(ch1,what="u") +plot(ch1,what="u") - -# An examle with only two sample events (for demonstration only) -( ch2 <- capHistSum(PikeNYPartial1,cols2use=-c(1,4:5)) ) + +# An examle with only two sample events (for demonstration only) +( ch2 <- capHistSum(PikeNYPartial1,cols2use=-c(1,4:5)) ) #> $caphist #> #> 00 01 10 11 @@ -269,7 +293,7 @@

      Examples

      #> #> attr(,"class") #> [1] "CapHist" -( ch2 <- capHistSum(PikeNYPartial1,cols2use=2:3) ) +( ch2 <- capHistSum(PikeNYPartial1,cols2use=2:3) ) #> $caphist #> #> 00 01 10 11 @@ -281,7 +305,7 @@

      Examples

      #> #> attr(,"class") #> [1] "CapHist" -( ch2 <- capHistSum(PikeNYPartial1,cols2ignore=c(1,4:5)) ) +( ch2 <- capHistSum(PikeNYPartial1,cols2ignore=c(1,4:5)) ) #> $caphist #> #> 00 01 10 11 @@ -293,29 +317,25 @@

      Examples

      #> #> attr(,"class") #> [1] "CapHist" - +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/catchCurve.html b/docs/reference/catchCurve.html index 3e2c1441..196230b5 100644 --- a/docs/reference/catchCurve.html +++ b/docs/reference/catchCurve.html @@ -1,255 +1,305 @@ -Mortality estimates from the descending limb of a catch curve. — catchCurve • FSAMortality estimates from the descending limb of a catch curve. — catchCurve • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Fits a linear model to the user-defined descending limb of a catch curve. Method functions extract estimates of the instantaneous (Z) and total annual (A) mortality rates with associated standard errors and confidence intervals. A plot method highlights the descending limb, shows the linear model on the descending limb, and, optionally, prints the estimated Z and A.

    -
    -
    catchCurve(x, ...)
    -
    -# S3 method for default
    -catchCurve(
    -  x,
    -  catch,
    -  ages2use = age,
    -  weighted = FALSE,
    -  negWeightReplace = 0,
    -  ...
    -)
    -
    -# S3 method for formula
    -catchCurve(
    -  x,
    -  data,
    -  ages2use = age,
    -  weighted = FALSE,
    -  negWeightReplace = 0,
    -  ...
    -)
    -
    -# S3 method for catchCurve
    -summary(object, parm = c("both", "all", "Z", "A", "lm"), ...)
    -
    -# S3 method for catchCurve
    -coef(object, parm = c("all", "both", "Z", "A", "lm"), ...)
    -
    -# S3 method for catchCurve
    -anova(object, ...)
    -
    -# S3 method for catchCurve
    -confint(
    -  object,
    -  parm = c("all", "both", "Z", "A", "lm"),
    -  level = conf.level,
    -  conf.level = 0.95,
    -  ...
    -)
    -
    -# S3 method for catchCurve
    -rSquared(object, digits = getOption("digits"), percent = FALSE, ...)
    -
    -# S3 method for catchCurve
    -plot(
    -  x,
    -  pos.est = "topright",
    -  cex.est = 0.95,
    -  round.est = c(3, 1),
    -  ylab = "log(Catch)",
    -  xlab = "Age",
    -  ylim = NULL,
    -  col.pt = "gray30",
    -  col.mdl = "black",
    -  lwd = 2,
    -  lty = 1,
    -  ...
    -)
    +
    +

    Usage

    +
    catchCurve(x, ...)
    +
    +# S3 method for default
    +catchCurve(
    +  x,
    +  catch,
    +  ages2use = age,
    +  weighted = FALSE,
    +  negWeightReplace = 0,
    +  ...
    +)
    +
    +# S3 method for formula
    +catchCurve(
    +  x,
    +  data,
    +  ages2use = age,
    +  weighted = FALSE,
    +  negWeightReplace = 0,
    +  ...
    +)
    +
    +# S3 method for catchCurve
    +summary(object, parm = c("both", "all", "Z", "A", "lm"), ...)
    +
    +# S3 method for catchCurve
    +coef(object, parm = c("all", "both", "Z", "A", "lm"), ...)
    +
    +# S3 method for catchCurve
    +anova(object, ...)
    +
    +# S3 method for catchCurve
    +confint(
    +  object,
    +  parm = c("all", "both", "Z", "A", "lm"),
    +  level = conf.level,
    +  conf.level = 0.95,
    +  ...
    +)
    +
    +# S3 method for catchCurve
    +rSquared(object, digits = getOption("digits"), percent = FALSE, ...)
    +
    +# S3 method for catchCurve
    +plot(
    +  x,
    +  pos.est = "topright",
    +  cex.est = 0.95,
    +  round.est = c(3, 1),
    +  ylab = "log(Catch)",
    +  xlab = "Age",
    +  ylim = NULL,
    +  col.pt = "gray30",
    +  col.mdl = "black",
    +  lwd = 2,
    +  lty = 1,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A numerical vector of assigned ages in the catch curve or a formula of the form catch~age when used in catchCurve. An object saved from catchCurve (i.e., of class catchCurve) when used in the methods.

    + +
    ...

    Additional arguments for methods.

    + +
    catch

    A numerical vector of catches or CPUEs for the ages in the catch curve. Not used if x is a formula.

    + +
    ages2use

    A numerical vector of ages that define the descending limb of the catch curve.

    + +
    weighted

    A logical that indicates whether a weighted regression should be used. See details.

    + +
    negWeightReplace

    A single non-negative numeric that will replace negative weights (defaults to 0). Only used when weighted=TRUE. See details.

    + +
    data

    A data.frame from which the variables in the x formula can be found. Not used if x is not a formula.

    + +
    object

    An object saved from the catchCurve call (i.e., of class catchCurve).

    + +
    parm

    A numeric or string (of parameter names) vector that specifies which parameters are to be given confidence intervals. If parm="lm" then confidence intervals for the underlying linear model are returned.

    + +
    level

    Same as conf.level. Used for compatibility with the generic confint function.

    + +
    conf.level

    A number representing the level of confidence to use for constructing confidence intervals.

    + +
    digits

    The number of digits to round the rSquared result to.

    + +
    percent

    A logical that indicates if the rSquared result should be returned as a percentage (=TRUE) or as a proportion (=FALSE; default).

    + +
    pos.est

    A string to identify where to place the estimated mortality rates on the plot. Can be set to one of "bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right" or "center" for positioning the estimated mortality rates on the plot. Typically "bottomleft" (DEFAULT) and "topright" will be “out-of-the-way” placements. Set pos.est to NULL to remove the estimated mortality rates from the plot.

    + +
    cex.est

    A single numeric character expansion value for the estimated mortality rates on the plot.

    + +
    round.est

    A numeric that indicates the number of decimal place to which Z (first value) and A (second value) should be rounded. If only one value then it will be used for both Z and A.

    + +
    ylab

    A label for the y-axis ("log(Catch)" is the default).

    + +
    xlab

    A label for the x-axis ("Age" is the default).

    + +
    ylim

    A numeric for the limits of the y-axis. If NULL then will default to a minimum of 0 or the lowest negative log catch and a maximum of the maximum log catch. If a single value then it will be the maximum of the y-axis. If two values then these will the minimum and maximum values of the y-axis.

    + +
    col.pt

    A string that indicates the color of the plotted points.

    + +
    col.mdl

    A string that indicates the color of the fitted line.

    + +
    lwd

    A numeric that indicates the line width of the fitted line.

    + +
    lty

    A numeric that indicates the type of line used for the fitted line.

    +
    -
    -

    Value

    -

    A list that contains the following items:

    • age The original vector of assigned ages.

    • +
      +

      Value

      + + +

      A list that contains the following items:

      +

      +
      • age The original vector of assigned ages.

      • catch The original vector of observed catches or CPUEs.

      • age.e A vector of assigned ages for which the catch curve was fit.

      • log.catch.e A vector of log catches or CPUEs for which the catch curve was fit.

      • W A vector of weights used in the catch curve fit. Will be NULL unless weighted=TRUE.

      • lm An lm object from the fit to the ages and log catches or CPUEs on the descending limb (i.e., in age.e and log.catch.e).

      -
      -

      Details

      +
      +

      Details

      The default is to use all ages in the age vector. This is appropriate only when the age and catch vectors contain only the ages and catches on the descending limb of the catch curve. Use ages2use to isolate only the catch and ages on the descending limb.

      If weighted=TRUE then a weighted regression is used where the weights are the log(number) at each age predicted from the unweighted regression of log(number) on age (as proposed by Maceina and Bettoli (1998)). If a negative weight is computed it will be changed to the value in negWeightReplace and a warning will be issued.

      -
      -

      Testing

      +
      +

      Testing

      Tested the results of catch curve, both unweighted and weighted, against the results in Miranda and Bettoli (2007). Results for Z and the SE of Z matched perfectly. Tested the unweighted results against the results from agesurv in fishmethods using the rockbass data.frame in fishmethods. Results for Z and the SE of Z matched perfectly.

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      11-Mortality.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Maceina, M.J., and P.W. Bettoli. 1998. Variation in Largemouth Bass recruitment in four mainstream impoundments on the Tennessee River. North American Journal of Fisheries Management 18:998-1003.

      Ricker, W.E. 1975. Computation and interpretation of biological statistics of fish populations. Technical Report Bulletin 191, Bulletin of the Fisheries Research Board of Canada. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/1485.pdf.]

      -
      -

      See also

      +
      +

      See also

      See agesurv in fishmethods for similar functionality. See chapmanRobson and agesurvcl in fishmethods for alternative methods to estimate mortality rates. See metaM for empirical methods to estimate natural mortality.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      plot(catch~age,data=BrookTroutTH,pch=19)
      +    
      +

      Examples

      +
      plot(catch~age,data=BrookTroutTH,pch=19)
       
      -
      -## demonstration of formula notation
      -cc1 <- catchCurve(catch~age,data=BrookTroutTH,ages2use=2:6)
      -summary(cc1)
      +
      +## demonstration of formula notation
      +cc1 <- catchCurve(catch~age,data=BrookTroutTH,ages2use=2:6)
      +summary(cc1)
       #>    Estimate Std. Error  t value   Pr(>|t|)
       #> Z  0.659987   0.136741 4.826549 0.01695159
       #> A 48.314197         NA       NA         NA
      -cbind(Est=coef(cc1),confint(cc1))
      +cbind(Est=coef(cc1),confint(cc1))
       #>         Est    95% LCI   95% UCI
       #> Z  0.659987  0.2248162  1.095158
       #> A 48.314197 20.1337012 66.551321
      -rSquared(cc1)
      +rSquared(cc1)
       #> [1] 0.8859124
      -plot(cc1)
      -summary(cc1,parm="Z")
      +plot(cc1)
      +summary(cc1,parm="Z")
       #>   Estimate Std. Error  t value   Pr(>|t|)
       #> Z 0.659987   0.136741 4.826549 0.01695159
      -cbind(Est=coef(cc1,parm="Z"),confint(cc1,parm="Z"))
      +cbind(Est=coef(cc1,parm="Z"),confint(cc1,parm="Z"))
       #>        Est   95% LCI  95% UCI
       #> Z 0.659987 0.2248162 1.095158
      -
      -## demonstration of excluding ages2use
      -cc2 <- catchCurve(catch~age,data=BrookTroutTH,ages2use=-c(0,1))
      -summary(cc2)
      +
      +## demonstration of excluding ages2use
      +cc2 <- catchCurve(catch~age,data=BrookTroutTH,ages2use=-c(0,1))
      +summary(cc2)
       #>    Estimate Std. Error  t value   Pr(>|t|)
       #> Z  0.659987   0.136741 4.826549 0.01695159
       #> A 48.314197         NA       NA         NA
      -plot(cc2)
      +plot(cc2)
       
      -
      -## demonstration of using weights
      -cc3 <- catchCurve(catch~age,data=BrookTroutTH,ages2use=2:6,weighted=TRUE)
      -summary(cc3)
      +
      +## demonstration of using weights
      +cc3 <- catchCurve(catch~age,data=BrookTroutTH,ages2use=2:6,weighted=TRUE)
      +summary(cc3)
       #>     Estimate Std. Error t value   Pr(>|t|)
       #> Z  0.6430183  0.1417433  4.5365 0.02004993
       #> A 47.4296703         NA      NA         NA
      -plot(cc3)
      +plot(cc3)
       
      -
      -## demonstration of returning the linear model results
      -summary(cc3,parm="lm")
      +
      +## demonstration of returning the linear model results
      +summary(cc3,parm="lm")
       #> 
       #> Call:
       #> stats::lm(formula = log.catch.e ~ age.e, weights = W, na.action = stats::na.exclude)
      @@ -269,57 +319,53 @@ 

      Examples

      #> Multiple R-squared: 0.8728, Adjusted R-squared: 0.8304 #> F-statistic: 20.58 on 1 and 3 DF, p-value: 0.02005 #> -cbind(Est=coef(cc3,parm="lm"),confint(cc3,parm="lm")) +cbind(Est=coef(cc3,parm="lm"),confint(cc3,parm="lm")) #> Est 95% LCI 95% UCI #> (Intercept) 6.0085938 4.266116 7.751072 #> age.e -0.6430183 -1.094109 -0.191928 - -## demonstration of ability to work with missing age classes -df <- data.frame(age=c( 2, 3, 4, 5, 7, 9,12), - ct= c(100,92,83,71,56,35, 1)) -cc4 <- catchCurve(ct~age,data=df,ages2use=4:12) + +## demonstration of ability to work with missing age classes +df <- data.frame(age=c( 2, 3, 4, 5, 7, 9,12), + ct= c(100,92,83,71,56,35, 1)) +cc4 <- catchCurve(ct~age,data=df,ages2use=4:12) #> Warning: Some 'ages2use' not in observed ages. -summary(cc4) +summary(cc4) #> Estimate Std. Error t value Pr(>|t|) #> Z 0.5139824 0.1495532 3.436786 0.04133277 #> A 40.1891060 NA NA NA -plot(cc4) +plot(cc4) - -## demonstration of ability to work with missing age classes -## evein if catches are recorded as NAs -df <- data.frame(age=c( 2, 3, 4, 5, 6, 7, 8, 9,10,11,12), - ct= c(100,92,83,71,NA,56,NA,35,NA,NA, 1)) -cc5 <- catchCurve(ct~age,data=df,ages2use=4:12) -summary(cc5) + +## demonstration of ability to work with missing age classes +## evein if catches are recorded as NAs +df <- data.frame(age=c( 2, 3, 4, 5, 6, 7, 8, 9,10,11,12), + ct= c(100,92,83,71,NA,56,NA,35,NA,NA, 1)) +cc5 <- catchCurve(ct~age,data=df,ages2use=4:12) +summary(cc5) #> Estimate Std. Error t value Pr(>|t|) #> Z 0.5139824 0.1495532 3.436786 0.04133277 #> A 40.1891060 NA NA NA -plot(cc5) +plot(cc5) - +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/chapmanRobson.html b/docs/reference/chapmanRobson.html index f97e8f0f..6a409c07 100644 --- a/docs/reference/chapmanRobson.html +++ b/docs/reference/chapmanRobson.html @@ -1,158 +1,200 @@ -Computes Chapman-Robson estimates of S and Z. — chapmanRobson • FSAComputes Chapman-Robson estimates of S and Z. — chapmanRobson • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes the Chapman-Robson estimates of annual survival rate (S) and instantaneous mortality rate (Z) from catch-at-age data on the descending limb of a catch-curve. Method functions extract estimates with associated standard errors and confidence intervals. A plot method highlights the descending-limb, shows the linear model on the descending limb, and, optionally, prints the estimated Z and A.

    -
    -
    chapmanRobson(x, ...)
    -
    -# S3 method for default
    -chapmanRobson(
    -  x,
    -  catch,
    -  ages2use = age,
    -  zmethod = c("Smithetal", "Hoenigetal", "original"),
    -  ...
    -)
    -
    -# S3 method for formula
    -chapmanRobson(
    -  x,
    -  data,
    -  ages2use = age,
    -  zmethod = c("Smithetal", "Hoenigetal", "original"),
    -  ...
    -)
    -
    -# S3 method for chapmanRobson
    -summary(object, parm = c("all", "both", "Z", "S"), verbose = FALSE, ...)
    -
    -# S3 method for chapmanRobson
    -coef(object, parm = c("all", "both", "Z", "S"), ...)
    -
    -# S3 method for chapmanRobson
    -confint(
    -  object,
    -  parm = c("all", "both", "S", "Z"),
    -  level = conf.level,
    -  conf.level = 0.95,
    -  ...
    -)
    -
    -# S3 method for chapmanRobson
    -plot(
    -  x,
    -  pos.est = "topright",
    -  cex.est = 0.95,
    -  round.est = c(3, 1),
    -  ylab = "Catch",
    -  xlab = "Age",
    -  ylim = NULL,
    -  col.pt = "gray30",
    -  axis.age = c("both", "age", "recoded age"),
    -  ...
    -)
    +
    +

    Usage

    +
    chapmanRobson(x, ...)
    +
    +# S3 method for default
    +chapmanRobson(
    +  x,
    +  catch,
    +  ages2use = age,
    +  zmethod = c("Smithetal", "Hoenigetal", "original"),
    +  ...
    +)
    +
    +# S3 method for formula
    +chapmanRobson(
    +  x,
    +  data,
    +  ages2use = age,
    +  zmethod = c("Smithetal", "Hoenigetal", "original"),
    +  ...
    +)
    +
    +# S3 method for chapmanRobson
    +summary(object, parm = c("all", "both", "Z", "S"), verbose = FALSE, ...)
    +
    +# S3 method for chapmanRobson
    +coef(object, parm = c("all", "both", "Z", "S"), ...)
    +
    +# S3 method for chapmanRobson
    +confint(
    +  object,
    +  parm = c("all", "both", "S", "Z"),
    +  level = conf.level,
    +  conf.level = 0.95,
    +  ...
    +)
    +
    +# S3 method for chapmanRobson
    +plot(
    +  x,
    +  pos.est = "topright",
    +  cex.est = 0.95,
    +  round.est = c(3, 1),
    +  ylab = "Catch",
    +  xlab = "Age",
    +  ylim = NULL,
    +  col.pt = "gray30",
    +  axis.age = c("both", "age", "recoded age"),
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A numerical vector of the assigned ages in the catch curve or a formula of the form catch~age when used in chapmanRobson. An object saved from chapmanRobson (i.e., of class chapmanRobson) when used in the methods.

    + +
    ...

    Additional arguments for methods.

    + +
    catch

    A numerical vector of the catches or CPUEs for the ages in the catch curve. Not used if x is a formula.

    + +
    ages2use

    A numerical vector of the ages that define the descending limb of the catch curve.

    + +
    zmethod

    A string that indicates the method to use for estimating Z. See details.

    + +
    data

    A data frame from which the variables in the x formula can be found. Not used if x is not a formula.

    + +
    object

    An object saved from the chapmanRobson call (i.e., of class chapmanRobson).

    + +
    parm

    A numeric or string (of parameter names) vector that specifies which parameters are to be given confidence intervals If missing, all parameters are considered.

    + +
    verbose

    A logical that indicates whether the method should return just the estimate (FALSE; default) or a more verbose statement.

    + +
    level

    Same as conf.level. Used for compatibility with the generic confint function.

    + +
    conf.level

    A number representing the level of confidence to use for constructing confidence intervals.

    + +
    pos.est

    A string to identify where to place the estimated mortality rates on the plot. Can be set to one of "bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right" or "center" for positioning the estimated mortality rates on the plot. Typically "bottomleft" (DEFAULT) and "topright" will be “out-of-the-way” placements. Set pos.est to NULL to remove the estimated mortality rates from the plot.

    + +
    cex.est

    A single numeric character expansion value for the estimated mortality rates on the plot.

    + +
    round.est

    A numeric that indicates the number of decimal place to which Z (first value) and S (second value) should be rounded. If only one value then it will be used for both Z and S.

    + +
    ylab

    A label for the y-axis ("Catch" is the default).

    + +
    xlab

    A label for the x-axis ("Age" is the default).

    + +
    ylim

    A numeric for the limits of the y-axis. If NULL then will default to 0 or the lowest catch and a maximum of the maximum catch. If a single value then it will be the maximum of the y-axis. If two values then these will the minimum and maximum values of the y-axis.

    + +
    col.pt

    A string that indicates the color of the plotted points.

    + +
    axis.age

    A string that indicates the type of x-axis to display. The age will display only the original ages, recoded age will display only the recoded ages, and both (DEFAULT) displays the original ages on the main axis and the recoded ages on the secondary axis.

    +
    -
    -

    Value

    -

    A list with the following items:

    • age the original vector of assigned ages.

    • +
      +

      Value

      + + +

      A list with the following items:

      +

      +
      • age the original vector of assigned ages.

      • catch the original vector of observed catches or CPUEs.

      • age.e a vector of assigned ages used to estimate mortalities.

      • catch.e a vector of catches or CPUEs used to estimate mortalities.

      • @@ -161,114 +203,110 @@

        Value

      • T a numeric holding the intermediate calculation of T. See references.

      • est A 2x2 matrix that contains the estimates and standard errors for S and Z.

      -
      -

      Details

      +
      +

      Details

      The default is to use all ages in the age vector. This is only appropriate if the age and catch vectors contain only the ages and catches on the descending limb of the catch curve. Use ages2use to isolate only the catch and ages on the descending limb.

      The Chapman-Robson method provides an estimate of the annual survival rate, with the annual mortality rate (A) determined by 1-S. The instantaneous mortality rate is often computed as -log(S). However, Hoenig et al. (1983) showed that this produced a biased (over)estimate of Z and provided a correction. The correction is applied by setting zmethod="Hoenigetal". Smith et al. (2012) showed that the Hoenig et al. method should be corrected for a variance inflation factor. This correction is applied by setting zmethod="Smithetal" (which is the default behavior). Choose zmethod="original" to use the original estimates for Z and it's SE as provided by Chapman and Robson.

      -
      -

      Testing

      +
      +

      Testing

      Tested the results of chapmanRobson against the results in Miranda and Bettoli (2007). The point estimates of S matched perfectly but the SE of S did not because Miranda and Bettoli used a rounded estimate of S in the calculation of the SE of S but chapmanRobson does not.

      Tested the results against the results from agesurv in fishmethods using the rockbass data.frame in fishmethods. Results for Z and the SE of Z matched perfectly for non-bias-corrected results. The estimate of Z, but not the SE of Z, matched for the bias-corrected (following Smith et al. (2012)) results. FSA uses equation 2 from Smith et al. (2012) whereas fishmethods appears to use equation 5 from the same source to estimate the SE of Z.

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      11-Mortality.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Chapman, D.G. and D.S. Robson. 1960. The analysis of a catch curve. Biometrics. 16:354-368.

      Hoenig, J.M. and W.D. Lawing, and N.A. Hoenig. 1983. Using mean age, mean length and median length data to estimate the total mortality rate. International Council for the Exploration of the Sea, CM 1983/D:23, Copenhagen.

      Ricker, W.E. 1975. Computation and interpretation of biological statistics of fish populations. Technical Report Bulletin 191, Bulletin of the Fisheries Research Board of Canada. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/1485.pdf.]

      Robson, D.S. and D.G. Chapman. 1961. Catch curves and mortality rates. Transactions of the American Fisheries Society. 90:181-189.

      Smith, M.W., A.Y. Then, C. Wor, G. Ralph, K.H. Pollock, and J.M. Hoenig. 2012. Recommendations for catch-curve analysis. North American Journal of Fisheries Management. 32:956-967.

      -
      -

      See also

      +
      +

      See also

      See agesurv in fishmethods for similar functionality. See catchCurve and agesurvcl in fishmethods for alternative methods. See metaM for empirical methods to estimate natural mortality.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      plot(catch~age,data=BrookTroutTH,pch=19)
      +    
      +

      Examples

      +
      plot(catch~age,data=BrookTroutTH,pch=19)
       
      -
      -## demonstration of formula notation
      -cr1 <- chapmanRobson(catch~age,data=BrookTroutTH,ages2use=2:6)
      -summary(cr1)
      +
      +## demonstration of formula notation
      +cr1 <- chapmanRobson(catch~age,data=BrookTroutTH,ages2use=2:6)
      +summary(cr1)
       #>     Estimate Std. Error
       #> S 49.4600432  2.3260749
       #> Z  0.7018264  0.1153428
      -summary(cr1,verbose=TRUE)
      +summary(cr1,verbose=TRUE)
       #> Intermediate statistics: n=235; T=229
       #>     Estimate Std. Error
       #> S 49.4600432  2.3260749
       #> Z  0.7018264  0.1153428
      -cbind(Est=coef(cr1),confint(cr1))
      +cbind(Est=coef(cr1),confint(cr1))
       #>          Est    95% LCI    95% UCI
       #> S 49.4600432 44.9010202 54.0190662
       #> Z  0.7018264  0.4757586  0.9278941
      -plot(cr1)
      +plot(cr1)
       
      -plot(cr1,axis.age="age")
      +plot(cr1,axis.age="age")
       
      -plot(cr1,axis.age="recoded age")
      +plot(cr1,axis.age="recoded age")
       
      -summary(cr1,parm="Z")
      +summary(cr1,parm="Z")
       #>    Estimate Std. Error
       #> Z 0.7018264  0.1153428
      -cbind(Est=coef(cr1,parm="Z"),confint(cr1,parm="Z"))
      +cbind(Est=coef(cr1,parm="Z"),confint(cr1,parm="Z"))
       #>         Est   95% LCI   95% UCI
       #> Z 0.7018264 0.4757586 0.9278941
      -
      -## demonstration of excluding ages2use
      -cr2 <- chapmanRobson(catch~age,data=BrookTroutTH,ages2use=-c(0,1))
      -summary(cr2)
      +
      +## demonstration of excluding ages2use
      +cr2 <- chapmanRobson(catch~age,data=BrookTroutTH,ages2use=-c(0,1))
      +summary(cr2)
       #>     Estimate Std. Error
       #> S 49.4600432  2.3260749
       #> Z  0.7018264  0.1153428
      -plot(cr2)
      +plot(cr2)
       
      -
      -## demonstration of ability to work with missing age classes
      -age <- c(  2, 3, 4, 5, 7, 9,12)
      -ct  <- c(100,92,83,71,56,35, 1)
      -cr3 <- chapmanRobson(age,ct,4:12)
      +
      +## demonstration of ability to work with missing age classes
      +age <- c(  2, 3, 4, 5, 7, 9,12)
      +ct  <- c(100,92,83,71,56,35, 1)
      +cr3 <- chapmanRobson(age,ct,4:12)
       #> Warning: Some 'ages2use' not in observed ages.
      -summary(cr3)
      +summary(cr3)
       #>     Estimate Std. Error
       #> S 63.2683658  1.8679976
       #> Z  0.4569234  0.1465991
      -plot(cr3)
      +plot(cr3)
       
      -
      +
       
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/col2rgbt.html b/docs/reference/col2rgbt.html index 9036b369..6952f006 100644 --- a/docs/reference/col2rgbt.html +++ b/docs/reference/col2rgbt.html @@ -1,124 +1,126 @@ -Converts an R color to RGB (red/green/blue) including a transparency (alpha channel). — col2rgbt • FSAConverts an R color to RGB (red/green/blue) including a transparency (alpha channel). — col2rgbt • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Converts an R color to RGB (red/green/blue) including a transparency (alpha channel). Similar to col2rgb except that a transparency (alpha channel) can be included.

    -
    -
    col2rgbt(col, transp = 1)
    +
    +

    Usage

    +
    col2rgbt(col, transp = 1)
    -
    -

    Arguments

    +
    +

    Arguments

    col

    A vector of any of the three kinds of R color specifications (i.e., either a color name (as listed by colors()), a hexadecimal string of the form "#rrggbb" or "#rrggbbaa" (see rgb), or a positive integer i meaning palette()[i].

    + +
    transp

    A numeric vector that indicates the transparency level for the color. The transparency values must be greater than 0. Transparency values greater than 1 are interpreted as the number of points plotted on top of each other before the transparency is lost and is, thus, transformed to the inverse of the transparency value provided.

    +
    -
    -

    Value

    -

    A vector of hexadecimal strings of the form "#rrggbbaa" as would be returned by rgb.

    +
    +

    Value

    + + +

    A vector of hexadecimal strings of the form "#rrggbbaa" as would be returned by rgb.

    -
    -

    See also

    +
    +

    See also

    See col2rgb for similar functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    col2rgbt("black")
    +    
    +

    Examples

    +
    col2rgbt("black")
     #> [1] "#000000FF"
    -col2rgbt("black",1/4)
    +col2rgbt("black",1/4)
     #> [1] "#00000040"
    -clrs <- c("black","blue","red","green")
    -col2rgbt(clrs)
    +clrs <- c("black","blue","red","green")
    +col2rgbt(clrs)
     #> [1] "#000000FF" "#0000FFFF" "#FF0000FF" "#00FF00FF"
    -col2rgbt(clrs,1/4)
    +col2rgbt(clrs,1/4)
     #> [1] "#00000040" "#0000FF40" "#FF000040" "#00FF0040"
    -trans <- (1:4)/5
    -col2rgbt(clrs,trans)
    +trans <- (1:4)/5
    +col2rgbt(clrs,trans)
     #> [1] "#00000033" "#0000FF66" "#FF000099" "#00FF00CC"
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/depletion.html b/docs/reference/depletion.html index 91584341..81239598 100644 --- a/docs/reference/depletion.html +++ b/docs/reference/depletion.html @@ -1,158 +1,206 @@ -Computes the Leslie or DeLury population estimate from catch and effort data. — depletion • FSAComputes the Leslie or DeLury population estimate from catch and effort data. — depletion • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes the Leslie or DeLury estimates of population size and catchability coefficient from paired catch and effort data. The Ricker modification may also be used.

    -
    -
    depletion(
    -  catch,
    -  effort,
    -  method = c("Leslie", "DeLury", "Delury"),
    -  Ricker.mod = FALSE
    -)
    -
    -# S3 method for depletion
    -summary(object, parm = c("all", "both", "No", "q", "lm"), verbose = FALSE, ...)
    -
    -# S3 method for depletion
    -coef(object, parm = c("all", "both", "No", "q", "lm"), ...)
    -
    -# S3 method for depletion
    -confint(
    -  object,
    -  parm = c("all", "both", "No", "q", "lm"),
    -  level = conf.level,
    -  conf.level = 0.95,
    -  ...
    -)
    -
    -# S3 method for depletion
    -anova(object, ...)
    -
    -# S3 method for depletion
    -rSquared(object, digits = getOption("digits"), percent = FALSE, ...)
    -
    -# S3 method for depletion
    -plot(
    -  x,
    -  xlab = NULL,
    -  ylab = NULL,
    -  pch = 19,
    -  col.pt = "black",
    -  col.mdl = "gray70",
    -  lwd = 1,
    -  lty = 1,
    -  pos.est = "topright",
    -  cex.est = 0.95,
    -  ...
    -)
    +
    +

    Usage

    +
    depletion(
    +  catch,
    +  effort,
    +  method = c("Leslie", "DeLury", "Delury"),
    +  Ricker.mod = FALSE
    +)
    +
    +# S3 method for depletion
    +summary(object, parm = c("all", "both", "No", "q", "lm"), verbose = FALSE, ...)
    +
    +# S3 method for depletion
    +coef(object, parm = c("all", "both", "No", "q", "lm"), ...)
    +
    +# S3 method for depletion
    +confint(
    +  object,
    +  parm = c("all", "both", "No", "q", "lm"),
    +  level = conf.level,
    +  conf.level = 0.95,
    +  ...
    +)
    +
    +# S3 method for depletion
    +anova(object, ...)
    +
    +# S3 method for depletion
    +rSquared(object, digits = getOption("digits"), percent = FALSE, ...)
    +
    +# S3 method for depletion
    +plot(
    +  x,
    +  xlab = NULL,
    +  ylab = NULL,
    +  pch = 19,
    +  col.pt = "black",
    +  col.mdl = "gray70",
    +  lwd = 1,
    +  lty = 1,
    +  pos.est = "topright",
    +  cex.est = 0.95,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    catch

    A numeric vector of catches of fish at each time.

    + +
    effort

    A numeric vector of efforts expended at each time.

    + +
    method

    A single string that indicates which depletion method to use

    + +
    Ricker.mod

    A single logical that indicates whether to use the modification proposed by Ricker (=TRUE) or not (=FALSE, default).

    + +
    object

    An object saved from the removal call (i.e., of class depletion).

    + +
    parm

    A specification of which parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all parameters are considered.

    + +
    verbose

    A logical that indicates whether a reminder of the method used should be printed with the summary results.

    + +
    ...

    Additional arguments for methods.

    + +
    level

    Same as conf.level but used for compatibility with generic confint function.

    + +
    conf.level

    A single number that represents the level of confidence to use for constructing confidence intervals.

    + +
    digits

    The number of digits to round the rSquared result to.

    + +
    percent

    A logical that indicates if the rSquared result should be returned as a percentage (=TRUE) or as a proportion (=FALSE; default).

    + +
    x

    An object saved from the depletion call (i.e., of class depletion).

    + +
    xlab

    A label for the x-axis.

    + +
    ylab

    A label for the y-axis.

    + +
    pch

    A numeric that indicates the type of plotting character.

    + +
    col.pt

    A string that indicates the color of the plotted points.

    + +
    col.mdl

    A string that indicates the color of the fitted line.

    + +
    lwd

    A numeric that indicates the line width of the fitted line.

    + +
    lty

    A numeric that indicates the type of line used for the fitted line.

    + +
    pos.est

    A single string to identify where to place the estimated population estimate and catchability on the plot. Can be set to one of "bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right" or "center" for positioning the estimated mortality rates on the plot. Typically "bottomleft" (DEFAULT) and "topright" will be “out-of-the-way” placements. Set pos.est to NULL to remove the estimated population size and catchability coefficient from the plot.

    + +
    cex.est

    A single numeric that identifies the character expansion value for the estimated population estimate and catchability placed on the plot.

    +
    -
    -

    Value

    -

    A list with the following items:

    • method A string that indicates whether the "Leslie" or "DeLury" model was used.

    • +
      +

      Value

      + + +

      A list with the following items:

      +

      +
      • method A string that indicates whether the "Leslie" or "DeLury" model was used.

      • catch The original vector of catches.

      • effort The original vector of efforts.

      • cpe A computed vector of catch-per-unit-effort for each time.

      • @@ -160,69 +208,69 @@

        Value

      • lm The lm object from the fit of CPE on K (Leslie method) or log(CPE) on E (DeLury method).

      • est A 2x2 matrix that contains the estimates and standard errors for No and q.

      -
      -

      Details

      +
      +

      Details

      For the Leslie method, a linear regression model of catch-per-unit-effort on cumulative catch prior to the sample is fit. The catchability coefficient (q) is estimated from the negative of the slope and the initial population size (No) is estimated by dividing the intercept by the catchability coefficient. If Ricker.mod=TRUE then the cumulative catch is modified to be the cumulative catch prior to the sample plus half of the catch of the current sample.

      For the DeLury method, a linear regression model of log (catch-per-unit-effort) on cumulative effort is fit. The catchability coefficient (q) is estimated from the negative of the slope and the initial population size (No) is estimated by dividing the intercept as an exponent of e by the catchability coefficient. If Ricker.mod=TRUE then the cumulative effort is modified to be the cumulative effort prior to the sample plus half of the effort of the current sample.

      Standard errors for the catchability and population size estimates are computed from formulas on page 298 (for Leslie) and 303 (for DeLury) from Seber (2002). Confidence intervals are computed using standard large-sample normal distribution theory with the regression error df.

      -
      -

      testing

      -

      The Leslie method without the Ricker modification and the DeLury method with the Ricker modification matches the results from deplet in fishmethods for the darter (from fishmethods), LobsterPEI and BlueCrab from FSAdata, and SMBassLS for N0 to whole numbers, the SE for No to one decimal, q to seven decimals, and the SE of q to at least five decimals.

      +
      +

      testing

      +

      The Leslie method without the Ricker modification and the DeLury method with the Ricker modification matches the results from deplet in fishmethods for the darter (from fishmethods), LobsterPEI and BlueCrab from FSAdata, and SMBassLS for N0 to whole numbers, the SE for No to one decimal, q to seven decimals, and the SE of q to at least five decimals.

      The Leslie method matches the results of Seber (2002) for N0, q, and the CI for Q but not the CI for N (which was so far off that it might be that Seber's result is incorrect) for the lobster data and the q and CI for q but the NO or its CI (likely due to lots of rounding in Seber 2002) for the Blue Crab data.

      The Leslie and DeLury methods match the results of Ricker (1975) for No and Q but not for the CI of No (Ricker used a very different method to compute CIs).

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      10-Abundance from Depletion Data.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Ricker, W.E. 1975. Computation and interpretation of biological statistics of fish populations. Technical Report Bulletin 191, Bulletin of the Fisheries Research Board of Canada. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/1485.pdf.]

      Seber, G.A.F. 2002. The Estimation of Animal Abundance. Edward Arnold, Second edition (reprinted).

      -
      -

      See also

      +
      +

      See also

      See removal for related functionality and deplet in fishmethods for similar functionality.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      ## Leslie model examples
      -# no Ricker modification
      -l1 <- depletion(SMBassLS$catch,SMBassLS$effort,method="Leslie")
      -summary(l1)
      +    
      +

      Examples

      +
      ## Leslie model examples
      +# no Ricker modification
      +l1 <- depletion(SMBassLS$catch,SMBassLS$effort,method="Leslie")
      +summary(l1)
       #>        Estimate    Std. Err.
       #> No 1.060296e+03 1.692676e+02
       #> q  1.484403e-02 3.520491e-03
      -summary(l1,verbose=TRUE)
      +summary(l1,verbose=TRUE)
       #> The Leslie method was used.
       #>        Estimate    Std. Err.
       #> No 1.060296e+03 1.692676e+02
       #> q  1.484403e-02 3.520491e-03
      -summary(l1,parm="No")
      +summary(l1,parm="No")
       #>    Estimate Std. Err.
       #> No 1060.296  169.2676
      -rSquared(l1)
      +rSquared(l1)
       #> [1] 0.6896648
      -rSquared(l1,digits=1,percent=TRUE)
      +rSquared(l1,digits=1,percent=TRUE)
       #> [1] 69
      -cbind(Est=coef(l1),confint(l1))
      +cbind(Est=coef(l1),confint(l1))
       #>             Est      95% LCI      95% UCI
       #> No 1.060296e+03 6.699638e+02 1.450627e+03
       #> q  1.484403e-02 6.725759e-03 2.296229e-02
      -cbind(Est=coef(l1,parm="No"),confint(l1,parm="No"))
      +cbind(Est=coef(l1,parm="No"),confint(l1,parm="No"))
       #>         Est  95% LCI  95% UCI
       #> No 1060.296 669.9638 1450.627
      -cbind(Est=coef(l1,parm="q"),confint(l1,parm="q"))
      +cbind(Est=coef(l1,parm="q"),confint(l1,parm="q"))
       #>          Est     95% LCI    95% UCI
       #> q 0.01484403 0.006725759 0.02296229
      -summary(l1,parm="lm")
      +summary(l1,parm="lm")
       #> 
       #> Call:
       #> stats::lm(formula = cpe ~ K)
      @@ -242,44 +290,44 @@ 

      Examples

      #> Multiple R-squared: 0.6897, Adjusted R-squared: 0.6509 #> F-statistic: 17.78 on 1 and 8 DF, p-value: 0.00293 #> -plot(l1) +plot(l1) - -# with Ricker modification -l2 <- depletion(SMBassLS$catch,SMBassLS$effort,method="Leslie",Ricker.mod=TRUE) -summary(l2) + +# with Ricker modification +l2 <- depletion(SMBassLS$catch,SMBassLS$effort,method="Leslie",Ricker.mod=TRUE) +summary(l2) #> Estimate Std. Err. #> No 1.077571e+03 1.778035e+02 #> q 1.525078e-02 3.911632e-03 -cbind(Est=coef(l2),confint(l1)) +cbind(Est=coef(l2),confint(l1)) #> Est 95% LCI 95% UCI #> No 1.077571e+03 6.699638e+02 1.450627e+03 #> q 1.525078e-02 6.725759e-03 2.296229e-02 -plot(l2) +plot(l2) - -## DeLury model examples -# no Ricker modification -d1 <- depletion(SMBassLS$catch,SMBassLS$effort,method="DeLury") -summary(d1) + +## DeLury model examples +# no Ricker modification +d1 <- depletion(SMBassLS$catch,SMBassLS$effort,method="DeLury") +summary(d1) #> Estimate Std. Err. #> No 1.098503e+03 1.916049e+02 #> q 1.319375e-02 3.585777e-03 -summary(d1,parm="q") +summary(d1,parm="q") #> Estimate Std. Err. #> q 0.01319375 0.003585777 -summary(d1,verbose=TRUE) +summary(d1,verbose=TRUE) #> The DeLury method was used. #> Estimate Std. Err. #> No 1.098503e+03 1.916049e+02 #> q 1.319375e-02 3.585777e-03 -rSquared(d1) +rSquared(d1) #> [1] 0.6285719 -cbind(Est=coef(d1),confint(d1)) +cbind(Est=coef(d1),confint(d1)) #> Est 95% LCI 95% UCI #> No 1.098503e+03 6.566616e+02 1.540345e+03 #> q 1.319375e-02 4.924937e-03 2.146257e-02 -summary(d1,parm="lm") +summary(d1,parm="lm") #> #> Call: #> stats::lm(formula = log(cpe) ~ E) @@ -299,47 +347,43 @@

      Examples

      #> Multiple R-squared: 0.6286, Adjusted R-squared: 0.5821 #> F-statistic: 13.54 on 1 and 8 DF, p-value: 0.006224 #> -plot(d1) +plot(d1) - -# with Ricker modification -d2 <- depletion(SMBassLS$catch,SMBassLS$effort,method="DeLury",Ricker.mod=TRUE) -summary(d2) + +# with Ricker modification +d2 <- depletion(SMBassLS$catch,SMBassLS$effort,method="DeLury",Ricker.mod=TRUE) +summary(d2) #> Estimate Std. Err. #> No 1.150420e+03 1.876083e+02 #> q 1.319375e-02 3.585777e-03 -cbind(Est=coef(d2),confint(d2)) +cbind(Est=coef(d2),confint(d2)) #> Est 95% LCI 95% UCI #> No 1.150420e+03 7.177940e+02 1.583045e+03 #> q 1.319375e-02 4.924937e-03 2.146257e-02 -cbind(Est=coef(d2,parm="q"),confint(d2,parm="q")) +cbind(Est=coef(d2,parm="q"),confint(d2,parm="q")) #> Est 95% LCI 95% UCI #> q 0.01319375 0.004924937 0.02146257 -plot(d2) +plot(d2) - +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/dunnTest.html b/docs/reference/dunnTest.html index 1e879d7c..7e274d79 100644 --- a/docs/reference/dunnTest.html +++ b/docs/reference/dunnTest.html @@ -1,123 +1,141 @@ -Dunn's Kruskal-Wallis Multiple Comparisons. — dunnTest • FSADunn's Kruskal-Wallis Multiple Comparisons. — dunnTest • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Performs Dunn's (1964) test of multiple comparisons following a significant Kruskal-Wallis test, possibly with a correction to control the experimentwise error rate. This is largely a wrapper for the dunn.test function in dunn.test. Please see and cite that package.

    -
    -
    dunnTest(x, ...)
    -
    -# S3 method for default
    -dunnTest(
    -  x,
    -  g,
    -  method = dunn.test::p.adjustment.methods[c(4, 2:3, 5:8, 1)],
    -  two.sided = TRUE,
    -  altp = two.sided,
    -  ...
    -)
    -
    -# S3 method for formula
    -dunnTest(
    -  x,
    -  data = NULL,
    -  method = dunn.test::p.adjustment.methods[c(4, 2:3, 5:8, 1)],
    -  two.sided = TRUE,
    -  altp = two.sided,
    -  ...
    -)
    -
    -# S3 method for dunnTest
    -print(x, dunn.test.results = FALSE, ...)
    +
    +

    Usage

    +
    dunnTest(x, ...)
    +
    +# S3 method for default
    +dunnTest(
    +  x,
    +  g,
    +  method = dunn.test::p.adjustment.methods[c(4, 2:3, 5:8, 1)],
    +  two.sided = TRUE,
    +  altp = two.sided,
    +  ...
    +)
    +
    +# S3 method for formula
    +dunnTest(
    +  x,
    +  data = NULL,
    +  method = dunn.test::p.adjustment.methods[c(4, 2:3, 5:8, 1)],
    +  two.sided = TRUE,
    +  altp = two.sided,
    +  ...
    +)
    +
    +# S3 method for dunnTest
    +print(x, dunn.test.results = FALSE, ...)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A numeric vector of data values or a formula of the form x~g.

    + +
    ...

    Not yet used.

    + +
    g

    A factor vector or a (non-numeric) vector that can be coerced to a factor vector.

    + +
    method

    A single string that identifies the method used to control the experimentwise error rate. See the list of methods in p.adjustment.methods (documented with dunn.test) in dunn.test.

    + +
    two.sided

    A single logical that indicates whether a two-sided p-value should be returned (TRUE; default) or not. See details.

    + +
    altp

    Same as two.sided. Allows similar code with the dunn.test function in dunn.test. two.sided is maintained because it pre-dates altp.

    + +
    data

    A data.frame that minimally contains x and g.

    + +
    dunn.test.results

    A single logical that indicates whether the results that would have been printed by dunn.test function in dunn.test are shown.

    +
    -
    -

    Value

    -

    A list with three items -- method is the long name of the method used to control the experimentwise error rate, dtres is the strings that would have been printed by the dunn.test function in dunn.test, and res is a data.frame with the following variables:

    • Comparison: Labels for each pairwise comparison.

    • +
      +

      Value

      + + +

      A list with three items -- method is the long name of the method used to control the experimentwise error rate, dtres is the strings that would have been printed by the dunn.test function in dunn.test, and res is a data.frame with the following variables:

      • Comparison: Labels for each pairwise comparison.

      • Z: Values for the Z test statistic for each comparison.

      • P.unadj: Unadjusted p-values for each comparison.

      • P.adj: Adjusted p-values for each comparison.

      -
      -

      Details

      +
      +

      Details

      This function performs “Dunn's” test of multiple comparisons following a Kruskal-Wallis test. Unadjusted one- or two-sided p-values for each pairwise comparison among groups are computed following Dunn's description as implemented in the dunn.test function from dunn.test. These p-values may be adjusted using methods in the p.adjustment.methods function in dunn.test.

      This function is largely a wrapper for the dunn.test function in dunn.test. Changes here are the possible use of formula notation, results not printed by the main function (but are printed in a more useful format (in my opinion) by the print function), the p-values are adjusted by default with the “holm” method, and two-sided p-values are returned by default. See dunn.test function in dunn.test for more details underlying these computations.

      -
      -

      Note

      +
      +

      Note

      The data.frame will be reduced to only those rows that are complete cases for x and g. In other words, rows with missing data for either x or g are removed from the analysis and a warning will be issued.

      There are a number of functions in other packages that do similar analyses.

      The results from DunnTest match the results (in a different format) from the dunn.test function from dunn.test.

      @@ -126,31 +144,31 @@

      Note

      The kruskalmc function from the pgirmess package uses the method described by Siegel and Castellan (1988).

      It is not clear which method kruskal from the agricolae package uses. It does not seem to output p-values but it does allow for a wide variety of methods to control the experimentwise error rate.

      -
      -

      References

      +
      +

      References

      Dunn, O.J. 1964. Multiple comparisons using rank sums. Technometrics 6:241-252.

      -
      -

      See also

      +
      +

      See also

      See kruskal.test, dunn.test in dunn.test, posthoc.kruskal.nemenyi.test in PMCMR, kruskalmc in pgirmess, and kruskal in agricolae.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com, but this is largely a wrapper (see details) for dunn.test in dunn.test written by Alexis Dinno.

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com, but this is largely a wrapper (see details) for dunn.test in dunn.test written by Alexis Dinno.

      -
      -

      Examples

      -
      ## pH in four ponds data from Zar (2010)
      -ponds <- data.frame(pond=as.factor(rep(1:4,each=8)),
      -                    pH=c(7.68,7.69,7.70,7.70,7.72,7.73,7.73,7.76,
      -                         7.71,7.73,7.74,7.74,7.78,7.78,7.80,7.81,
      -                         7.74,7.75,7.77,7.78,7.80,7.81,7.84,NA,
      -                         7.71,7.71,7.74,7.79,7.81,7.85,7.87,7.91))
      -ponds2 <- ponds[complete.cases(ponds),]
      -
      -# non-formula usage (default "holm" method)
      -dunnTest(ponds2$pH,ponds2$pond)
      +    
      +

      Examples

      +
      ## pH in four ponds data from Zar (2010)
      +ponds <- data.frame(pond=as.factor(rep(1:4,each=8)),
      +                    pH=c(7.68,7.69,7.70,7.70,7.72,7.73,7.73,7.76,
      +                         7.71,7.73,7.74,7.74,7.78,7.78,7.80,7.81,
      +                         7.74,7.75,7.77,7.78,7.80,7.81,7.84,NA,
      +                         7.71,7.71,7.74,7.79,7.81,7.85,7.87,7.91))
      +ponds2 <- ponds[complete.cases(ponds),]
      +
      +# non-formula usage (default "holm" method)
      +dunnTest(ponds2$pH,ponds2$pond)
       #> Dunn (1964) Kruskal-Wallis multiple comparison
       #>   p-values adjusted with the Holm method.
       #>   Comparison           Z     P.unadj      P.adj
      @@ -160,9 +178,9 @@ 

      Examples

      #> 4 1 - 4 -2.99180882 0.002773299 0.01663979 #> 5 2 - 4 -0.85480252 0.392660483 0.78532097 #> 6 3 - 4 0.05898698 0.952962480 0.95296248 - -# formula usage (default "holm" method) -dunnTest(pH~pond,data=ponds2) + +# formula usage (default "holm" method) +dunnTest(pH~pond,data=ponds2) #> Dunn (1964) Kruskal-Wallis multiple comparison #> p-values adjusted with the Holm method. #> Comparison Z P.unadj P.adj @@ -172,9 +190,9 @@

      Examples

      #> 4 1 - 4 -2.99180882 0.002773299 0.01663979 #> 5 2 - 4 -0.85480252 0.392660483 0.78532097 #> 6 3 - 4 0.05898698 0.952962480 0.95296248 - -# other methods -dunnTest(pH~pond,data=ponds2,method="bonferroni") + +# other methods +dunnTest(pH~pond,data=ponds2,method="bonferroni") #> Dunn (1964) Kruskal-Wallis multiple comparison #> p-values adjusted with the Bonferroni method. #> Comparison Z P.unadj P.adj @@ -184,7 +202,7 @@

      Examples

      #> 4 1 - 4 -2.99180882 0.002773299 0.01663979 #> 5 2 - 4 -0.85480252 0.392660483 1.00000000 #> 6 3 - 4 0.05898698 0.952962480 1.00000000 -dunnTest(pH~pond,data=ponds2,method="bh") +dunnTest(pH~pond,data=ponds2,method="bh") #> Dunn (1964) Kruskal-Wallis multiple comparison #> p-values adjusted with the Benjamini-Hochberg method. #> Comparison Z P.unadj P.adj @@ -194,7 +212,7 @@

      Examples

      #> 4 1 - 4 -2.99180882 0.002773299 0.016639793 #> 5 2 - 4 -0.85480252 0.392660483 0.471192580 #> 6 3 - 4 0.05898698 0.952962480 0.952962480 -dunnTest(pH~pond,data=ponds2,method="none") +dunnTest(pH~pond,data=ponds2,method="none") #> Dunn (1964) Kruskal-Wallis multiple comparison #> with no adjustment for p-values. #> Comparison Z P.unadj P.adj @@ -204,9 +222,9 @@

      Examples

      #> 4 1 - 4 -2.99180882 0.002773299 0.002773299 #> 5 2 - 4 -0.85480252 0.392660483 0.392660483 #> 6 3 - 4 0.05898698 0.952962480 0.952962480 - -# one-sided -dunnTest(pH~pond,data=ponds2,two.sided=FALSE) + +# one-sided +dunnTest(pH~pond,data=ponds2,two.sided=FALSE) #> Dunn (1964) Kruskal-Wallis multiple comparison #> p-values adjusted with the Holm method. #> Comparison Z P.unadj P.adj @@ -216,9 +234,9 @@

      Examples

      #> 4 1 - 4 -2.99180882 0.001386649 0.008319896 #> 5 2 - 4 -0.85480252 0.196330241 0.392660483 #> 6 3 - 4 0.05898698 0.476481240 0.476481240 - -# warning message if incomplete cases were removed -dunnTest(pH~pond,data=ponds) + +# warning message if incomplete cases were removed +dunnTest(pH~pond,data=ponds) #> Warning: Some rows deleted from 'x' and 'g' because missing data. #> Dunn (1964) Kruskal-Wallis multiple comparison #> p-values adjusted with the Holm method. @@ -229,10 +247,10 @@

      Examples

      #> 4 1 - 4 -2.99180882 0.002773299 0.01663979 #> 5 2 - 4 -0.85480252 0.392660483 0.78532097 #> 6 3 - 4 0.05898698 0.952962480 0.95296248 - -# print dunn.test results -tmp <- dunnTest(pH~pond,data=ponds2) -print(tmp,dunn.test.results=TRUE) + +# print dunn.test results +tmp <- dunnTest(pH~pond,data=ponds2) +print(tmp,dunn.test.results=TRUE) #> Kruskal-Wallis rank sum test #> #> data: x and g @@ -255,29 +273,25 @@

      Examples

      #> #> alpha = 0.05 #> Reject Ho if p <= alpha - +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/expandCounts.html b/docs/reference/expandCounts.html index c8d29bd6..17a98c78 100644 --- a/docs/reference/expandCounts.html +++ b/docs/reference/expandCounts.html @@ -1,124 +1,144 @@ -Repeat individual fish data (including lengths) from tallied counts. — expandCounts • FSARepeat individual fish data (including lengths) from tallied counts. — expandCounts • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Repeat individual fish data, including lengths, from tallied counts and, optionally, add a random digit to length measurements to simulate actual length of fish in the bin. This is useful as a precursor to summaries that require information, e.g., lengths, of individual fish (e.g., length frequency histograms, means lengths).

    -
    -
    expandCounts(
    -  data,
    -  cform,
    -  lform = NULL,
    -  removeCount = TRUE,
    -  lprec = 0.1,
    -  new.name = "newlen",
    -  cwid = 0,
    -  verbose = TRUE,
    -  ...
    -)
    +
    +

    Usage

    +
    expandCounts(
    +  data,
    +  cform,
    +  lform = NULL,
    +  removeCount = TRUE,
    +  lprec = 0.1,
    +  new.name = "newlen",
    +  cwid = 0,
    +  verbose = TRUE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    data

    A data.frame that contains variables in cform and lform.

    + +
    cform

    A formula of the form ~countvar where countvar generically represents the variable in data that contains the counts of individuals. See details.

    + +
    lform

    An optional formula of the form ~lowerbin+upperbin where lowerbin and upperbin generically represent the variables in data that identify the lower- and upper-values of the length bins. See details.

    + +
    removeCount

    A single logical that indicates if the variable that contains the counts of individuals (as given in cform) should be removed form the returned data.frame. The default is TRUE such that the variable will be removed as the returned data.frame contains individuals and the counts of individuals in tallied bins is not relevant to an individual.

    + +
    lprec

    A single numeric that controls the precision to which the random lengths are recorded. See details.

    + +
    new.name

    A single string that contains a name for the new length variable if random lengths are to be created.

    + +
    cwid

    A single positive numeric that will be added to the lower length bin value in instances where the count exceeds one but only a lower (and not an upper) length were recorded. See details.

    + +
    verbose

    A logical indicating whether progress message should be printed or not.

    + +
    ...

    Not yet implemented.

    +
    -
    -

    Value

    -

    A data.frame of the same structure as data except that the variable in cform may be deleted and the variable in new.name may be added. The returned data.frame will have more rows than data because of the potential addition of new individuals expanded from the counts in cform.

    +
    +

    Value

    + + +

    A data.frame of the same structure as data except that the variable in cform may be deleted and the variable in new.name may be added. The returned data.frame will have more rows than data because of the potential addition of new individuals expanded from the counts in cform.

    -
    -

    Details

    +
    +

    Details

    Fisheries data may be recorded as tallied counts in the field. For example, field biologists may have simply recorded that there were 10 fish in one group, 15 in another, etc. More specifically, the biologist may have recorded that there were 10 male Bluegill from the first sampling event between 100 and 124 mm, 15 male Bluegill from the first sampling event between 125 and 149 mm, and so on. At times, it may be necessary to expand these counts such that the repeated information appears in individual rows in a new data.frame. In this specific example, the tallied counts would be repeated such that the male, Bluegill, first sampling event, 100-124 mm information would be repeated 10 times; the male, Bluegill, first sampling event, 125-149 mm information would be repeated 15 times, and so on. This function facilitates this type of expansion.

    Length data has often been collected in a “binned-and-tallied” format (e.g., 10 fish in the 100-124 mm group, 15 in the 125-149 mm group, etc.). This type of data collection does not facilitate easy or precise calculations of summary statistics of length (i.e., mean and standard deviations of length). Expanding the data as described above does not solve this problem because the length data are still essentially categorical (i.e., which group the fish belongs to rather than what it's actual length is). To facilitate computation of summary statistics, the data can be expanded as described above and then a length can be randomly selected from within the recorded length bin to serve as a “measured” length for that fish. This function performs this type of expansion by randomly selecting the length from a uniform distribution within the length bin (e.g., each value between 100 and 124 mm has the same probability of being selected).

    This function makes some assumptions for some coding situations. First, it assumes that all lowerbin values are actually lower than all upperbin values. The function will throw an error if this is not true. Second, it assumes that if a lowerbin but no upperbin value is given then the lowerbin value is the exact measurement for those fish. Third, it assumes that if an upperbin but no lowerbin value is given that this is a data entry error and that the upperbin value should be the lowerbin value. Fourth, it assumes that it is a data entry error if varcount is zero or NA and lowerbin or upperbin contains values (i.e., why would there be lengths if no fish were captured?).

    -
    -

    See also

    +
    +

    See also

    See expandLenFreq for expanding length frequencies where individual fish measurements were made on individual fish in a subsample and the remaining fish were simply counted.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    # all need expansion
    -( d1 <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"),
    -                   lwr.bin=c(15,15.5,16,16,17,17),
    -                   upr.bin=c(15.5,16,16.5,16.5,17.5,17.5),
    -                   freq=c(6,4,2,3,1,1)) )
    +    
    +

    Examples

    +
    # all need expansion
    +( d1 <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"),
    +                   lwr.bin=c(15,15.5,16,16,17,17),
    +                   upr.bin=c(15.5,16,16.5,16.5,17.5,17.5),
    +                   freq=c(6,4,2,3,1,1)) )
     #>      name lwr.bin upr.bin freq
     #> 1 Johnson    15.0    15.5    6
     #> 2 Johnson    15.5    16.0    4
    @@ -126,7 +146,7 @@ 

    Examples

    #> 4 Frank 16.0 16.5 3 #> 5 Frank 17.0 17.5 1 #> 6 Max 17.0 17.5 1 -expandCounts(d1,~freq) +expandCounts(d1,~freq) #> Results messages from expandCounts(): #> 2 rows had an individual measurement. #> 4 rows with multiple measurements were expanded to 15 rows of individual measurements. @@ -148,34 +168,34 @@

    Examples

    #> 15 Frank 16.0 16.5 #> 16 Frank 16.0 16.5 #> 17 Frank 16.0 16.5 -expandCounts(d1,~freq,~lwr.bin+upr.bin) +expandCounts(d1,~freq,~lwr.bin+upr.bin) #> Results messages from expandCounts(): #> 2 rows had an individual measurement. #> 4 rows with multiple measurements were expanded to 15 rows of individual measurements. #> name lwr.bin upr.bin newlen lennote -#> 1 Frank 17.0 17.5 17.5 Expanded length -#> 2 Max 17.0 17.5 17.0 Expanded length -#> 3 Johnson 15.0 15.5 15.2 Expanded length -#> 4 Johnson 15.0 15.5 15.2 Expanded length -#> 5 Johnson 15.0 15.5 15.5 Expanded length -#> 6 Johnson 15.0 15.5 15.0 Expanded length -#> 7 Johnson 15.0 15.5 15.5 Expanded length -#> 8 Johnson 15.0 15.5 15.1 Expanded length -#> 9 Johnson 15.5 16.0 15.5 Expanded length +#> 1 Frank 17.0 17.5 17.1 Expanded length +#> 2 Max 17.0 17.5 17.2 Expanded length +#> 3 Johnson 15.0 15.5 15.3 Expanded length +#> 4 Johnson 15.0 15.5 15.5 Expanded length +#> 5 Johnson 15.0 15.5 15.2 Expanded length +#> 6 Johnson 15.0 15.5 15.4 Expanded length +#> 7 Johnson 15.0 15.5 15.3 Expanded length +#> 8 Johnson 15.0 15.5 15.0 Expanded length +#> 9 Johnson 15.5 16.0 15.7 Expanded length #> 10 Johnson 15.5 16.0 15.9 Expanded length -#> 11 Johnson 15.5 16.0 15.8 Expanded length +#> 11 Johnson 15.5 16.0 15.6 Expanded length #> 12 Johnson 15.5 16.0 15.5 Expanded length -#> 13 Jones 16.0 16.5 16.1 Expanded length +#> 13 Jones 16.0 16.5 16.0 Expanded length #> 14 Jones 16.0 16.5 16.5 Expanded length -#> 15 Frank 16.0 16.5 16.1 Expanded length +#> 15 Frank 16.0 16.5 16.3 Expanded length #> 16 Frank 16.0 16.5 16.4 Expanded length #> 17 Frank 16.0 16.5 16.4 Expanded length - -# some need expansion -( d2 <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"), - lwr.bin=c(15,15.5,16,16,17.1,17.3), - upr.bin=c(15.5,16,16.5,16.5,17.1,17.3), - freq=c(6,4,2,3,1,1)) ) + +# some need expansion +( d2 <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"), + lwr.bin=c(15,15.5,16,16,17.1,17.3), + upr.bin=c(15.5,16,16.5,16.5,17.1,17.3), + freq=c(6,4,2,3,1,1)) ) #> name lwr.bin upr.bin freq #> 1 Johnson 15.0 15.5 6 #> 2 Johnson 15.5 16.0 4 @@ -183,7 +203,7 @@

    Examples

    #> 4 Frank 16.0 16.5 3 #> 5 Frank 17.1 17.1 1 #> 6 Max 17.3 17.3 1 -expandCounts(d2,~freq) +expandCounts(d2,~freq) #> Results messages from expandCounts(): #> 2 rows had an individual measurement. #> 4 rows with multiple measurements were expanded to 15 rows of individual measurements. @@ -205,34 +225,34 @@

    Examples

    #> 15 Frank 16.0 16.5 #> 16 Frank 16.0 16.5 #> 17 Frank 16.0 16.5 -expandCounts(d2,~freq,~lwr.bin+upr.bin) +expandCounts(d2,~freq,~lwr.bin+upr.bin) #> Results messages from expandCounts(): #> 2 rows had an individual measurement. #> 4 rows with multiple measurements were expanded to 15 rows of individual measurements. #> name lwr.bin upr.bin newlen lennote #> 1 Frank 17.1 17.1 17.1 Observed length #> 2 Max 17.3 17.3 17.3 Observed length -#> 3 Johnson 15.0 15.5 15.1 Expanded length +#> 3 Johnson 15.0 15.5 15.2 Expanded length #> 4 Johnson 15.0 15.5 15.5 Expanded length -#> 5 Johnson 15.0 15.5 15.5 Expanded length -#> 6 Johnson 15.0 15.5 15.2 Expanded length -#> 7 Johnson 15.0 15.5 15.3 Expanded length -#> 8 Johnson 15.0 15.5 15.3 Expanded length -#> 9 Johnson 15.5 16.0 15.5 Expanded length -#> 10 Johnson 15.5 16.0 15.9 Expanded length -#> 11 Johnson 15.5 16.0 15.6 Expanded length -#> 12 Johnson 15.5 16.0 15.8 Expanded length -#> 13 Jones 16.0 16.5 16.3 Expanded length -#> 14 Jones 16.0 16.5 16.4 Expanded length +#> 5 Johnson 15.0 15.5 15.1 Expanded length +#> 6 Johnson 15.0 15.5 15.4 Expanded length +#> 7 Johnson 15.0 15.5 15.0 Expanded length +#> 8 Johnson 15.0 15.5 15.1 Expanded length +#> 9 Johnson 15.5 16.0 15.8 Expanded length +#> 10 Johnson 15.5 16.0 15.5 Expanded length +#> 11 Johnson 15.5 16.0 15.7 Expanded length +#> 12 Johnson 15.5 16.0 16.0 Expanded length +#> 13 Jones 16.0 16.5 16.2 Expanded length +#> 14 Jones 16.0 16.5 16.1 Expanded length #> 15 Frank 16.0 16.5 16.2 Expanded length -#> 16 Frank 16.0 16.5 16.0 Expanded length -#> 17 Frank 16.0 16.5 16.5 Expanded length - -# none need expansion -( d3 <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"), - lwr.bin=c(15,15.5,16,16,17.1,17.3), - upr.bin=c(15,15.5,16,16,17.1,17.3), - freq=c(6,4,2,3,1,1)) ) +#> 16 Frank 16.0 16.5 16.5 Expanded length +#> 17 Frank 16.0 16.5 16.1 Expanded length + +# none need expansion +( d3 <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"), + lwr.bin=c(15,15.5,16,16,17.1,17.3), + upr.bin=c(15,15.5,16,16,17.1,17.3), + freq=c(6,4,2,3,1,1)) ) #> name lwr.bin upr.bin freq #> 1 Johnson 15.0 15.0 6 #> 2 Johnson 15.5 15.5 4 @@ -240,7 +260,7 @@

    Examples

    #> 4 Frank 16.0 16.0 3 #> 5 Frank 17.1 17.1 1 #> 6 Max 17.3 17.3 1 -expandCounts(d3,~freq) +expandCounts(d3,~freq) #> Results messages from expandCounts(): #> 2 rows had an individual measurement. #> 4 rows with multiple measurements were expanded to 15 rows of individual measurements. @@ -262,7 +282,7 @@

    Examples

    #> 15 Frank 16.0 16.0 #> 16 Frank 16.0 16.0 #> 17 Frank 16.0 16.0 -expandCounts(d3,~freq,~lwr.bin+upr.bin) +expandCounts(d3,~freq,~lwr.bin+upr.bin) #> Results messages from expandCounts(): #> 2 rows had an individual measurement. #> 4 rows with multiple measurements were expanded to 15 rows of individual measurements. @@ -284,12 +304,12 @@

    Examples

    #> 15 Frank 16.0 16.0 16.0 Observed length #> 16 Frank 16.0 16.0 16.0 Observed length #> 17 Frank 16.0 16.0 16.0 Observed length - -# some need expansion, but different bin widths -( d4 <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"), - lwr.bin=c(15, 15, 16, 16, 17.1,17.3), - upr.bin=c(15.5,15.9,16.5,16.9,17.1,17.3), - freq=c(6,4,2,3,1,1)) ) + +# some need expansion, but different bin widths +( d4 <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"), + lwr.bin=c(15, 15, 16, 16, 17.1,17.3), + upr.bin=c(15.5,15.9,16.5,16.9,17.1,17.3), + freq=c(6,4,2,3,1,1)) ) #> name lwr.bin upr.bin freq #> 1 Johnson 15.0 15.5 6 #> 2 Johnson 15.0 15.9 4 @@ -297,7 +317,7 @@

    Examples

    #> 4 Frank 16.0 16.9 3 #> 5 Frank 17.1 17.1 1 #> 6 Max 17.3 17.3 1 -expandCounts(d4,~freq) +expandCounts(d4,~freq) #> Results messages from expandCounts(): #> 2 rows had an individual measurement. #> 4 rows with multiple measurements were expanded to 15 rows of individual measurements. @@ -319,34 +339,34 @@

    Examples

    #> 15 Frank 16.0 16.9 #> 16 Frank 16.0 16.9 #> 17 Frank 16.0 16.9 -expandCounts(d4,~freq,~lwr.bin+upr.bin) +expandCounts(d4,~freq,~lwr.bin+upr.bin) #> Results messages from expandCounts(): #> 2 rows had an individual measurement. #> 4 rows with multiple measurements were expanded to 15 rows of individual measurements. #> name lwr.bin upr.bin newlen lennote #> 1 Frank 17.1 17.1 17.1 Observed length #> 2 Max 17.3 17.3 17.3 Observed length -#> 3 Johnson 15.0 15.5 15.1 Expanded length -#> 4 Johnson 15.0 15.5 15.2 Expanded length -#> 5 Johnson 15.0 15.5 15.5 Expanded length +#> 3 Johnson 15.0 15.5 15.0 Expanded length +#> 4 Johnson 15.0 15.5 15.5 Expanded length +#> 5 Johnson 15.0 15.5 15.2 Expanded length #> 6 Johnson 15.0 15.5 15.1 Expanded length -#> 7 Johnson 15.0 15.5 15.5 Expanded length -#> 8 Johnson 15.0 15.5 15.3 Expanded length -#> 9 Johnson 15.0 15.9 15.8 Expanded length -#> 10 Johnson 15.0 15.9 15.1 Expanded length -#> 11 Johnson 15.0 15.9 15.0 Expanded length -#> 12 Johnson 15.0 15.9 15.2 Expanded length -#> 13 Jones 16.0 16.5 16.3 Expanded length -#> 14 Jones 16.0 16.5 16.0 Expanded length -#> 15 Frank 16.0 16.9 16.5 Expanded length -#> 16 Frank 16.0 16.9 16.2 Expanded length +#> 7 Johnson 15.0 15.5 15.1 Expanded length +#> 8 Johnson 15.0 15.5 15.2 Expanded length +#> 9 Johnson 15.0 15.9 15.1 Expanded length +#> 10 Johnson 15.0 15.9 15.9 Expanded length +#> 11 Johnson 15.0 15.9 15.6 Expanded length +#> 12 Johnson 15.0 15.9 15.6 Expanded length +#> 13 Jones 16.0 16.5 16.4 Expanded length +#> 14 Jones 16.0 16.5 16.3 Expanded length +#> 15 Frank 16.0 16.9 16.0 Expanded length +#> 16 Frank 16.0 16.9 16.9 Expanded length #> 17 Frank 16.0 16.9 16.9 Expanded length - -# some need expansion but include zeros and NAs for counts -( d2a <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max","Max","Max","Max"), - lwr.bin=c(15, 15.5,16 ,16 ,17.1,17.3,NA,NA,NA), - upr.bin=c(15.5,16 ,16.5,16.5,17.1,17.3,NA,NA,NA), - freq=c(6,4,2,3,1,1,NA,0,NA)) ) + +# some need expansion but include zeros and NAs for counts +( d2a <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max","Max","Max","Max"), + lwr.bin=c(15, 15.5,16 ,16 ,17.1,17.3,NA,NA,NA), + upr.bin=c(15.5,16 ,16.5,16.5,17.1,17.3,NA,NA,NA), + freq=c(6,4,2,3,1,1,NA,0,NA)) ) #> name lwr.bin upr.bin freq #> 1 Johnson 15.0 15.5 6 #> 2 Johnson 15.5 16.0 4 @@ -357,7 +377,7 @@

    Examples

    #> 7 Max NA NA NA #> 8 Max NA NA 0 #> 9 Max NA NA NA -expandCounts(d2a,~freq,~lwr.bin+upr.bin) +expandCounts(d2a,~freq,~lwr.bin+upr.bin) #> Results messages from expandCounts(): #> Rows 7, 8, 9 had zero or no counts in freq. #> 2 rows had an individual measurement. @@ -368,27 +388,27 @@

    Examples

    #> 3 Max NA NA NA Observed length #> 4 Frank 17.1 17.1 17.1 Observed length #> 5 Max 17.3 17.3 17.3 Observed length -#> 6 Johnson 15.0 15.5 15.1 Expanded length +#> 6 Johnson 15.0 15.5 15.5 Expanded length #> 7 Johnson 15.0 15.5 15.5 Expanded length -#> 8 Johnson 15.0 15.5 15.3 Expanded length -#> 9 Johnson 15.0 15.5 15.3 Expanded length +#> 8 Johnson 15.0 15.5 15.2 Expanded length +#> 9 Johnson 15.0 15.5 15.4 Expanded length #> 10 Johnson 15.0 15.5 15.4 Expanded length -#> 11 Johnson 15.0 15.5 15.2 Expanded length -#> 12 Johnson 15.5 16.0 15.5 Expanded length +#> 11 Johnson 15.0 15.5 15.4 Expanded length +#> 12 Johnson 15.5 16.0 15.6 Expanded length #> 13 Johnson 15.5 16.0 15.6 Expanded length -#> 14 Johnson 15.5 16.0 16.0 Expanded length -#> 15 Johnson 15.5 16.0 15.9 Expanded length -#> 16 Jones 16.0 16.5 16.2 Expanded length -#> 17 Jones 16.0 16.5 16.0 Expanded length -#> 18 Frank 16.0 16.5 16.0 Expanded length -#> 19 Frank 16.0 16.5 16.0 Expanded length -#> 20 Frank 16.0 16.5 16.5 Expanded length - -# some need expansion but include NAs for upper values -( d2b <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"), - lwr.bin=c(15, 15.5,16 ,16 ,17.1,17.3), - upr.bin=c(NA ,NA ,16.5,16.5,17.1,17.3), - freq=c(6,4,2,3,1,1)) ) +#> 14 Johnson 15.5 16.0 15.8 Expanded length +#> 15 Johnson 15.5 16.0 15.7 Expanded length +#> 16 Jones 16.0 16.5 16.1 Expanded length +#> 17 Jones 16.0 16.5 16.5 Expanded length +#> 18 Frank 16.0 16.5 16.2 Expanded length +#> 19 Frank 16.0 16.5 16.3 Expanded length +#> 20 Frank 16.0 16.5 16.4 Expanded length + +# some need expansion but include NAs for upper values +( d2b <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"), + lwr.bin=c(15, 15.5,16 ,16 ,17.1,17.3), + upr.bin=c(NA ,NA ,16.5,16.5,17.1,17.3), + freq=c(6,4,2,3,1,1)) ) #> name lwr.bin upr.bin freq #> 1 Johnson 15.0 NA 6 #> 2 Johnson 15.5 NA 4 @@ -396,7 +416,7 @@

    Examples

    #> 4 Frank 16.0 16.5 3 #> 5 Frank 17.1 17.1 1 #> 6 Max 17.3 17.3 1 -expandCounts(d2b,~freq,~lwr.bin+upr.bin) +expandCounts(d2b,~freq,~lwr.bin+upr.bin) #> Results messages from expandCounts(): #> 2 rows had an individual measurement. #> 4 rows with multiple measurements were expanded to 15 rows of individual measurements. @@ -414,16 +434,16 @@

    Examples

    #> 11 Johnson 15.5 15.5 15.5 Observed length #> 12 Johnson 15.5 15.5 15.5 Observed length #> 13 Jones 16.0 16.5 16.0 Expanded length -#> 14 Jones 16.0 16.5 16.4 Expanded length -#> 15 Frank 16.0 16.5 16.3 Expanded length -#> 16 Frank 16.0 16.5 16.4 Expanded length -#> 17 Frank 16.0 16.5 16.0 Expanded length - -# some need expansion but include NAs for upper values -( d2c <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"), - lwr.bin=c(NA,NA, 16 ,16 ,17.1,17.3), - upr.bin=c(15,15.5,16.5,16.5,17.1,17.3), - freq=c(6,4,2,3,1,1)) ) +#> 14 Jones 16.0 16.5 16.3 Expanded length +#> 15 Frank 16.0 16.5 16.2 Expanded length +#> 16 Frank 16.0 16.5 16.2 Expanded length +#> 17 Frank 16.0 16.5 16.4 Expanded length + +# some need expansion but include NAs for upper values +( d2c <- data.frame(name=c("Johnson","Johnson","Jones","Frank","Frank","Max"), + lwr.bin=c(NA,NA, 16 ,16 ,17.1,17.3), + upr.bin=c(15,15.5,16.5,16.5,17.1,17.3), + freq=c(6,4,2,3,1,1)) ) #> name lwr.bin upr.bin freq #> 1 Johnson NA 15.0 6 #> 2 Johnson NA 15.5 4 @@ -431,7 +451,7 @@

    Examples

    #> 4 Frank 16.0 16.5 3 #> 5 Frank 17.1 17.1 1 #> 6 Max 17.3 17.3 1 -expandCounts(d2c,~freq,~lwr.bin+upr.bin) +expandCounts(d2c,~freq,~lwr.bin+upr.bin) #> Results messages from expandCounts(): #> 2 rows had an individual measurement. #> 4 rows with multiple measurements were expanded to 15 rows of individual measurements. @@ -448,56 +468,52 @@

    Examples

    #> 10 Johnson 15.5 15.5 15.5 Observed length #> 11 Johnson 15.5 15.5 15.5 Observed length #> 12 Johnson 15.5 15.5 15.5 Observed length -#> 13 Jones 16.0 16.5 16.4 Expanded length -#> 14 Jones 16.0 16.5 16.5 Expanded length +#> 13 Jones 16.0 16.5 16.0 Expanded length +#> 14 Jones 16.0 16.5 16.4 Expanded length #> 15 Frank 16.0 16.5 16.5 Expanded length -#> 16 Frank 16.0 16.5 16.2 Expanded length -#> 17 Frank 16.0 16.5 16.3 Expanded length - -if (FALSE) { -##!!##!!## Change path to where example file is and then run to demo - -## Read in datafile (note periods in names) -df <- read.csv("c:/aaawork/consulting/R_WiDNR/Statewide/Surveysummaries2010.csv") -str(df) -## narrow variables for simplicity -df1 <- df[,c("County","Waterbody.Name","Survey.Year","Gear","Species", - "Number.of.Fish","Length.or.Lower.Length.IN","Length.Upper.IN", - "Weight.Pounds","Gender")] -## Sum the count to see how many fish there should be after expansion -sum(df1$Number.of.Fish) - -## Simple expansion -df2 <- expandCounts(df1,~Number.of.Fish) - -## Same expansion but include random component to lengths (thus new variable) -## also note default lprec=0.1 -df3 <- expandCounts(df1,~Number.of.Fish,~Length.or.Lower.Length.IN+Length.Upper.IN) - -} - +#> 16 Frank 16.0 16.5 16.0 Expanded length +#> 17 Frank 16.0 16.5 16.0 Expanded length + +if (FALSE) { +##!!##!!## Change path to where example file is and then run to demo + +## Read in datafile (note periods in names) +df <- read.csv("c:/aaawork/consulting/R_WiDNR/Statewide/Surveysummaries2010.csv") +str(df) +## narrow variables for simplicity +df1 <- df[,c("County","Waterbody.Name","Survey.Year","Gear","Species", + "Number.of.Fish","Length.or.Lower.Length.IN","Length.Upper.IN", + "Weight.Pounds","Gender")] +## Sum the count to see how many fish there should be after expansion +sum(df1$Number.of.Fish) + +## Simple expansion +df2 <- expandCounts(df1,~Number.of.Fish) + +## Same expansion but include random component to lengths (thus new variable) +## also note default lprec=0.1 +df3 <- expandCounts(df1,~Number.of.Fish,~Length.or.Lower.Length.IN+Length.Upper.IN) + +} +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/expandLenFreq.html b/docs/reference/expandLenFreq.html index d36afb39..3d4de942 100644 --- a/docs/reference/expandLenFreq.html +++ b/docs/reference/expandLenFreq.html @@ -1,124 +1,142 @@ -Expands a length frequency based on a subsample. — expandLenFreq • FSAExpands a length frequency based on a subsample. — expandLenFreq • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Creates a vector of lengths for the individuals not measured based on the lengths measured in a subsample of individuals.

    -
    -
    expandLenFreq(
    -  x,
    -  w,
    -  additional,
    -  startcat = NULL,
    -  total = additional + length(x),
    -  decimals = decs$wdec,
    -  show.summary = TRUE,
    -  ...
    -)
    +
    +

    Usage

    +
    expandLenFreq(
    +  x,
    +  w,
    +  additional,
    +  startcat = NULL,
    +  total = additional + length(x),
    +  decimals = decs$wdec,
    +  show.summary = TRUE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A numeric vector of length measurements.

    + +
    w

    A number that indicates the width of length classes to create.

    + +
    additional

    The number of individuals that were not measured in the sample (for which measurements should be determined).

    + +
    startcat

    A number that indicates the beginning of the first length-class.

    + +
    total

    The total number of individuals in the sample (including those that were measured in the subsample).

    + +
    decimals

    A number that indicates the number of decimals used in the output vector of estimated lengths.

    + +
    show.summary

    A logical that indicates whether a summary of the process should be shown at the end.

    + +
    ...

    Optional arguments to be passed to lencat.

    +
    -
    -

    Value

    -

    Returns a vector that consists of measurements for the non-measured individuals in the entire sample.

    +
    +

    Value

    + + +

    Returns a vector that consists of measurements for the non-measured individuals in the entire sample.

    -
    -

    Details

    +
    +

    Details

    Creates a vector of lengths for the individuals not measured based on the lengths measured in a subsample of individuals. Length categories are created first that begin with the value in startcat (or the minimum observed value by default) and continue by values of w until a category value greater than the largest observed length in x. Categories of different widths are not allowed.

    The resulting “expanded” lengths are created by allocating individuals to each length class based on the proportion of measured individuals in the subsample in that length class. Individuals within a length class are then assigned a specific length within that length class based on a uniform distribution. Because the expanded number of individuals in a length class is rounded down based on the measured number per length class, not all individuals will initially be assigned a length value. The remaining individuals are assigned to a length class randomly according to weights based on the proportion of individuals in the measured length classes. Finally, these individuals are randomly assigned a specific length within the respective length class from a uniform distribution, same as above.

    The resulting length assignments are rounded to the number of decimals shown in decimal. If decimals is not set by the user then it will default to the same number of decimals shown in the w value. Care is taken to make sure that the rounded result will not pass out of the given length category (i.e., will not be allowed to round up to the next length category). Generally speaking, one will want to use more decimals then is shown in w. For example, one may want to create length categories with a width of 1 inch (i.e., w=1) but have the results recorded as if measured to within 0.1 inch (i.e., decimals=1).

    -
    -

    See also

    +
    +

    See also

    See expandCounts for expanding more than just lengths or expanding lengths when there is a known number in each length bin. See lencat for creating length bins.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Set the random seed for reproducibility
    -set.seed(15343437)
    -
    -## First example
    -# random lengths measured to nearest 0.1 unit -- values in a vector
    -len1 <- round(runif(50,0.1,9.9),1)
    -# assignment of integer lengths to 110 non-measured individuals
    -new.len1a <- expandLenFreq(len1,w=1,total=160)
    +    
    +

    Examples

    +
    ## Set the random seed for reproducibility
    +set.seed(15343437)
    +
    +## First example
    +# random lengths measured to nearest 0.1 unit -- values in a vector
    +len1 <- round(runif(50,0.1,9.9),1)
    +# assignment of integer lengths to 110 non-measured individuals
    +new.len1a <- expandLenFreq(len1,w=1,total=160)
     #> Length Frequency Expansion using:
     #>  Measured length frequency of 50 individuals:
     #>    0    1    2    3    4    5    6    7    8    9 
    @@ -131,12 +149,12 @@ 

    Examples

    #> With final length frequency table of: #> 0 1 2 3 4 5 6 7 8 9 #> 8 6 4 8 17 11 15 4 20 17 -new.len1a +new.len1a #> [1] 0 0 0 0 0 0 0 0 1 1 1 1 1 1 2 2 2 2 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 #> [38] 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5 5 5 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 7 7 7 7 8 #> [75] 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 -# assignment of lengths to 0.1 to 110 non-measured individuals -new.len1b <- expandLenFreq(len1,w=1,total=160,decimals=1) +# assignment of lengths to 0.1 to 110 non-measured individuals +new.len1b <- expandLenFreq(len1,w=1,total=160,decimals=1) #> Length Frequency Expansion using: #> Measured length frequency of 50 individuals: #> 0 1 2 3 4 5 6 7 8 9 @@ -149,7 +167,7 @@

    Examples

    #> With final length frequency table of: #> 0 1 2 3 4 5 6 7 8 9 #> 8 6 5 8 16 11 16 4 19 17 -new.len1b +new.len1b #> [1] 0.1 0.1 0.2 0.3 0.3 0.3 0.5 0.9 1.3 1.4 1.5 1.5 1.6 1.6 2.1 2.1 2.3 2.6 #> [19] 2.8 3.0 3.1 3.3 3.5 3.5 3.7 3.8 3.9 4.0 4.1 4.2 4.3 4.3 4.4 4.4 4.5 4.6 #> [37] 4.7 4.8 4.8 4.8 4.8 4.8 4.9 5.0 5.1 5.3 5.4 5.6 5.6 5.8 5.8 5.8 5.8 5.9 @@ -157,12 +175,12 @@

    Examples

    #> [73] 7.6 7.6 8.0 8.0 8.0 8.0 8.1 8.2 8.3 8.3 8.3 8.3 8.3 8.4 8.4 8.6 8.7 8.7 #> [91] 8.8 8.8 8.8 9.0 9.2 9.2 9.2 9.2 9.2 9.3 9.3 9.4 9.5 9.6 9.6 9.6 9.8 9.8 #> [109] 9.8 9.8 - -## Second example -- if values are in a data.frame -# random lengths measured to nearest 0.1 unit -len2 <- data.frame(len=round(runif(50,10,117),1)) -# assignment of lengths to 0.1 for 140 non-measured indivs -new.len2a <- expandLenFreq(len2$len,w=10,total=190,decimals=1) + +## Second example -- if values are in a data.frame +# random lengths measured to nearest 0.1 unit +len2 <- data.frame(len=round(runif(50,10,117),1)) +# assignment of lengths to 0.1 for 140 non-measured indivs +new.len2a <- expandLenFreq(len2$len,w=10,total=190,decimals=1) #> Length Frequency Expansion using: #> Measured length frequency of 50 individuals: #> 10 20 30 40 50 60 70 80 90 100 110 @@ -175,7 +193,7 @@

    Examples

    #> With final length frequency table of: #> 10 20 30 40 50 60 70 80 90 100 110 #> 8 8 8 20 6 15 18 20 16 5 16 -new.len2a +new.len2a #> [1] 10.1 10.7 12.2 15.9 16.2 16.3 17.8 18.2 24.2 25.0 25.4 25.6 #> [13] 27.0 27.5 29.1 29.9 31.2 31.9 31.9 32.9 33.5 36.0 38.0 38.6 #> [25] 41.0 41.8 42.5 42.7 42.7 42.8 43.1 43.3 43.4 44.0 44.2 44.2 @@ -188,12 +206,12 @@

    Examples

    #> [109] 92.5 93.7 93.8 94.6 94.7 95.2 95.3 95.9 96.2 98.6 99.5 103.6 #> [121] 104.6 104.9 106.0 109.5 110.4 111.1 111.4 111.7 112.5 112.6 113.9 114.1 #> [133] 114.1 114.5 115.2 116.8 117.1 118.6 119.3 119.7 - -## Third example -# hypothetically measured lengths -len <- c(6.7,6.9,7.3,7.4,7.5,8.2,8.7,8.9) -# find lengths for unmeasured fish assuming a total of 30 -newlen1 <- expandLenFreq(len,w=0.5,total=30,decimals=1) + +## Third example +# hypothetically measured lengths +len <- c(6.7,6.9,7.3,7.4,7.5,8.2,8.7,8.9) +# find lengths for unmeasured fish assuming a total of 30 +newlen1 <- expandLenFreq(len,w=0.5,total=30,decimals=1) #> Length Frequency Expansion using: #> Measured length frequency of 8 individuals: #> 6.5 7 7.5 8 8.5 @@ -206,11 +224,11 @@

    Examples

    #> With final length frequency table of: #> 6.5 7 7.5 8 8.5 #> 6 5 2 4 5 -newlen1 +newlen1 #> [1] 6.6 6.7 6.7 6.7 6.8 6.9 7.1 7.1 7.2 7.3 7.3 7.5 7.7 8.0 8.2 8.3 8.3 8.5 8.6 #> [20] 8.7 8.8 8.8 -# set a starting length category -newlen2 <- expandLenFreq(len,w=0.5,startcat=6.2,total=30,decimals=1) +# set a starting length category +newlen2 <- expandLenFreq(len,w=0.5,startcat=6.2,total=30,decimals=1) #> Length Frequency Expansion using: #> Measured length frequency of 8 individuals: #> 6.7 7.2 8.2 8.7 @@ -223,32 +241,28 @@

    Examples

    #> With final length frequency table of: #> 6.7 7.2 8.2 8.7 #> 6 9 2 5 -newlen2 +newlen2 #> [1] 6.7 6.7 6.9 6.9 6.9 6.9 7.2 7.3 7.3 7.3 7.4 7.4 7.4 7.6 7.6 8.4 8.5 8.7 8.8 #> [20] 9.0 9.0 9.1 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/extraTests.html b/docs/reference/extraTests.html index 7924c82d..748bc6c3 100644 --- a/docs/reference/extraTests.html +++ b/docs/reference/extraTests.html @@ -1,90 +1,106 @@ -Likelihood ratio and extra sum-of-squares tests. — extraTests • FSALikelihood ratio and extra sum-of-squares tests. — extraTests • FSA + + Skip to contents -
    -
    -
    - +
    +
    +
    -
    +

    Likelihood ratio and extra sum-of-squares tests with multiple lm or nls models nested within one common model. This function is most useful when the nested functions are all at the same level; otherwise use anova() or lrtest() which are more flexible.

    -
    -
    lrt(sim, ..., com, sim.names = sim.name, sim.name = NULL, com.name = NULL)
    -
    -extraSS(sim, ..., com, sim.names = sim.name, sim.name = NULL, com.name = NULL)
    -
    -# S3 method for extraTest
    -print(x, ...)
    +
    +

    Usage

    +
    lrt(sim, ..., com, sim.names = sim.name, sim.name = NULL, com.name = NULL)
    +
    +extraSS(sim, ..., com, sim.names = sim.name, sim.name = NULL, com.name = NULL)
    +
    +# S3 method for extraTest
    +print(x, ...)
    -
    -

    Arguments

    +
    +

    Arguments

    sim

    The results of one lm or nls model, for example, that is a nested subset of the model in com=.

    + +
    ...

    More model results that are nested subsets of the model in com=.

    + +
    com

    The results of one lm or nls model, for example, that the models in sim= and ... are a subset of.

    + +
    sim.name, sim.names

    A string vector of “names” for simple model in sim= and .... sim.names is preferred but sim.name is allowed to allow for a common typing mistake.

    + +
    com.name

    A single “name” string for the complex model in com=.

    + +
    x

    An object from lrt() or extraSS().

    +
    -
    -

    Value

    -

    The main function returns a matrix with as many rows as model comparisons and columns of the following types:

    • DfO The error degrees-of-freedom from the subset (more simple) model.

    • +
      +

      Value

      + + +

      The main function returns a matrix with as many rows as model comparisons and columns of the following types:

      +

      +
      • DfO The error degrees-of-freedom from the subset (more simple) model.

      • RSSO, logLikO The residual sum-of-squares (from extraSS) or log-likelihood (from lrt) from the subset (more simple) model.

      • DfA The error degrees-of-freedom from the com= model.

      • RSSA, logLikA The residual sum-of-squares (from extraSS) or log-likelihood (from lrt) from the com= model.

      • @@ -93,34 +109,34 @@

        Value

      • F, Chisq The corresponding F- (from extraSS) or Chi-square (from lrt) test statistic.

      • Pr(>F), Pr(>Chisq) The corresponding p-value.

      -
      -

      Details

      +
      +

      Details

      anova and lrtest (from lmtest) provide simple methods for conducting extra sum-of-squares or likelihood ratio tests when one model is nested within another model or when there are several layers of simple models all sequentially nested within each other. However, to compare several models that are nested at the same level with one common more complex model, then anova() and lrtest() must be repeated for each comparison. This repetition can be eliminated with lapply() but then the output is voluminous. This function is designed to remove the repetitiveness and to provide output that is compact and easy to read.

      -
      -

      Note

      +
      +

      Note

      This function is experimental at this point. It seems to work fine for lm and nls models. An error will be thrown by extraSS for other model classes, but lrt will not (but it has not been thoroughly tests for other models).

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      ## Example data
      -df <- data.frame(x=c(1,2,3,4,5,6,7,8,9,10),
      -                 y=c(4,6,5,7,9,8,7,12,16,22),
      -                 z=as.factor(rep(c("A","B"),each=5)),
      -                 w=as.factor(rep(c("A","B"),times=5)))
      -df$x2 <- df$x^2
      -
      -## Linear (lm()) models
      -#  ... regression
      -fit.0 <- lm(y~1,data=df)
      -fit.1 <- lm(y~x,data=df)
      -fit.2 <- lm(y~x2+x,data=df)
      -extraSS(fit.0,fit.1,com=fit.2)
      +    
      +

      Examples

      +
      ## Example data
      +df <- data.frame(x=c(1,2,3,4,5,6,7,8,9,10),
      +                 y=c(4,6,5,7,9,8,7,12,16,22),
      +                 z=as.factor(rep(c("A","B"),each=5)),
      +                 w=as.factor(rep(c("A","B"),times=5)))
      +df$x2 <- df$x^2
      +
      +## Linear (lm()) models
      +#  ... regression
      +fit.0 <- lm(y~1,data=df)
      +fit.1 <- lm(y~x,data=df)
      +fit.2 <- lm(y~x2+x,data=df)
      +extraSS(fit.0,fit.1,com=fit.2)
       #> Model 1: y ~ 1
       #> Model 2: y ~ x
       #> Model A: y ~ x2 + x 
      @@ -130,7 +146,7 @@ 

      Examples

      #> 2vA 8 67.988 7 27.617 1 40.371 10.233 0.0150891 * #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -lrt(fit.0,fit.1,com=fit.2) +lrt(fit.0,fit.1,com=fit.2) #> Loading required namespace: lmtest #> Model 1: y ~ 1 #> Model 2: y ~ x @@ -141,10 +157,10 @@

      Examples

      #> 2vA 8 -23.7731 7 -19.2686 1 -4.5045 9.0091 0.002686 ** #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 - -# ... show labels for models -extraSS(fit.0,fit.1,com=fit.2, - sim.names=c("Null Model","Linear"),com.name="Quadratic") + +# ... show labels for models +extraSS(fit.0,fit.1,com=fit.2, + sim.names=c("Null Model","Linear"),com.name="Quadratic") #> Model 1: Null Model #> Model 2: Linear #> Model A: Quadratic @@ -154,8 +170,8 @@

      Examples

      #> 2vA 8 67.988 7 27.617 1 40.371 10.233 0.0150891 * #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -lrt(fit.0,fit.1,com=fit.2, - sim.names=c("Null Model","Linear"),com.name="Quadratic") +lrt(fit.0,fit.1,com=fit.2, + sim.names=c("Null Model","Linear"),com.name="Quadratic") #> Model 1: Null Model #> Model 2: Linear #> Model A: Quadratic @@ -165,10 +181,10 @@

      Examples

      #> 2vA 8 -23.7731 7 -19.2686 1 -4.5045 9.0091 0.002686 ** #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 - -# ... dummy variable regression -fit.2b <- lm(y~x*z,data=df) -extraSS(fit.0,fit.1,com=fit.2b) + +# ... dummy variable regression +fit.2b <- lm(y~x*z,data=df) +extraSS(fit.0,fit.1,com=fit.2b) #> Model 1: y ~ 1 #> Model 2: y ~ x #> Model A: y ~ x * z @@ -178,7 +194,7 @@

      Examples

      #> 2vA 8 67.988 6 17.800 2 50.188 8.4586 0.0179459 * #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -lrt(fit.0,fit.1,com=fit.2b) +lrt(fit.0,fit.1,com=fit.2b) #> Model 1: y ~ 1 #> Model 2: y ~ x #> Model A: y ~ x * z @@ -188,11 +204,11 @@

      Examples

      #> 2vA 8 -23.7731 6 -17.0725 2 -6.7007 13.401 0.00123 ** #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 - -# ... ANOVAs -fit.1 <- lm(y~w,data=df) -fit.2 <- lm(y~w*z,data=df) -extraSS(fit.0,fit.1,com=fit.2) + +# ... ANOVAs +fit.1 <- lm(y~w,data=df) +fit.2 <- lm(y~w*z,data=df) +extraSS(fit.0,fit.1,com=fit.2) #> Model 1: y ~ 1 #> Model 2: y ~ w #> Model A: y ~ w * z @@ -200,7 +216,7 @@

      Examples

      #> DfO RSSO DfA RSSA Df SS F Pr(>F) #> 1vA 9 282.4 6 159.0 3 123.4 1.5522 0.2955 #> 2vA 8 262.8 6 159.0 2 103.8 1.9585 0.2215 -lrt(fit.0,fit.1,com=fit.2) +lrt(fit.0,fit.1,com=fit.2) #> Model 1: y ~ 1 #> Model 2: y ~ w #> Model A: y ~ w * z @@ -210,13 +226,13 @@

      Examples

      #> 2vA 8 -30.5334 6 -28.0210 2 -2.5124 5.0249 0.08107 . #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 - - -## Non-linear (nls()) models -fit.0 = nls(y~c,data=df,start=list(c=10)) -fit.1 = nls(y~a*x+c,data=df,start=list(a=1,c=1)) -fit.2 = nls(y~b*x2+a*x+c,data=df,start=list(a=-1,b=0.3,c=10)) -extraSS(fit.0,fit.1,com=fit.2) + + +## Non-linear (nls()) models +fit.0 = nls(y~c,data=df,start=list(c=10)) +fit.1 = nls(y~a*x+c,data=df,start=list(a=1,c=1)) +fit.2 = nls(y~b*x2+a*x+c,data=df,start=list(a=-1,b=0.3,c=10)) +extraSS(fit.0,fit.1,com=fit.2) #> Model 1: y ~ c #> Model 2: y ~ a * x + c #> Model A: y ~ b * x2 + a * x + c @@ -226,7 +242,7 @@

      Examples

      #> 2vA 8 67.988 7 27.617 1 40.371 10.233 0.0150891 * #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -lrt(fit.0,fit.1,com=fit.2) +lrt(fit.0,fit.1,com=fit.2) #> Model 1: y ~ c #> Model 2: y ~ a * x + c #> Model A: y ~ b * x2 + a * x + c @@ -236,40 +252,36 @@

      Examples

      #> 2vA 8 -23.7731 7 -19.2686 1 -4.5045 9.0091 0.002686 ** #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 - -## General least-squares (gls()) models -if (FALSE) { - require(nlme) - fit.0 <- gls(y~1,data=df,method="ML") - fit.1 <- gls(y~x,data=df,method="ML") - fit.2 <- gls(y~x2+x,data=df,method="ML") - lrt(fit.0,fit.1, com=fit.2) - ## will return an error ... does not work with gls() models - # extraSS(fit.0,fit.1, com=fit.2) -} - + +## General least-squares (gls()) models +if (FALSE) { + require(nlme) + fit.0 <- gls(y~1,data=df,method="ML") + fit.1 <- gls(y~x,data=df,method="ML") + fit.2 <- gls(y~x2+x,data=df,method="ML") + lrt(fit.0,fit.1, com=fit.2) + ## will return an error ... does not work with gls() models + # extraSS(fit.0,fit.1, com=fit.2) +} +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/fact2num.html b/docs/reference/fact2num.html index 2e1271e3..624a579e 100644 --- a/docs/reference/fact2num.html +++ b/docs/reference/fact2num.html @@ -1,121 +1,121 @@ -Converts "numeric" factor levels to numeric values. — fact2num • FSAConverts "numeric" factor levels to numeric values. — fact2num • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Converts “numeric” factor levels to numeric values.

    -
    -
    fact2num(object)
    +
    +

    Usage

    +
    fact2num(object)
    -
    -

    Arguments

    +
    +

    Arguments

    object

    A vector with “numeric” factor levels to be converted to numeric values.

    +
    -
    -

    Value

    -

    A numeric vector.

    +
    +

    Value

    + + +

    A numeric vector.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    junk <- factor(c(1,7,2,4,3,10))
    -str(junk)
    +    
    +

    Examples

    +
    junk <- factor(c(1,7,2,4,3,10))
    +str(junk)
     #>  Factor w/ 6 levels "1","2","3","4",..: 1 5 2 4 3 6
    -junk2 <- fact2num(junk)
    -str(junk2)
    +junk2 <- fact2num(junk)
    +str(junk2)
     #>  num [1:6] 1 7 2 4 3 10
    -
    -## ONLY RUN IN INTERACTIVE MODE
    -if (interactive()) {
    -
    -bad <- factor(c("A","B","C"))
    -# This will result in an error -- levels are not 'numeric'
    -bad2 <- fact2num(bad)
    -
    -}  ## END IF INTERACTIVE MODE
    -
    +
    +## ONLY RUN IN INTERACTIVE MODE
    +if (interactive()) {
    +
    +bad <- factor(c("A","B","C"))
    +# This will result in an error -- levels are not 'numeric'
    +bad2 <- fact2num(bad)
    +
    +}  ## END IF INTERACTIVE MODE
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/fishR.html b/docs/reference/fishR.html index b7010dbd..f90e25e8 100644 --- a/docs/reference/fishR.html +++ b/docs/reference/fishR.html @@ -1,120 +1,123 @@ -Opens web pages associated with the fishR website. — fishR • FSAOpens web pages associated with the fishR website. — fishR • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    -

    Opens web pages associated with the fishR website in a browser. The user can open the main page or choose a specific page to open.

    +
    +

    Opens web pages associated with the fishR website in a browser. The user can open the main page or choose a specific page to open.

    -
    -
    fishR(
    -  where = c("home", "IFAR", "general", "books", "AIFFD", "posts", "news"),
    -  open = TRUE
    -)
    +
    +

    Usage

    +
    fishR(
    +  where = c("home", "posts", "books", "IFAR", "AIFFD", "packages", "data"),
    +  open = TRUE
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    where

    A string that indicates a particular page on the fishR website to open.

    + +
    open

    A logical that indicates whether the webpage should be opened in the default browser. Defaults to TRUE; FALSE is used for unit testing.

    +
    -
    -

    Value

    -

    None, but a webpage will be opened in the default browser.

    +
    +

    Value

    + + +

    None, but a webpage will be opened in the default browser.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    if (FALSE) {
    -## Opens an external webpage ... only run interactively
    -fishR()            # home page
    -fishR("IFAR")      # Introduction to Fisheries Analysis with R page
    -fishR("general")   # examples page
    -fishR("books")     # examples page
    -fishR("AIFFD")     # Analysis & Interpretation of Freshw. Fisher. Data page
    -fishR("posts")     # blog posts (some examples) page
    -}
    -
    +    
    +

    Examples

    +
    if (FALSE) {
    +## Opens an external webpage ... only run interactively
    +fishR()            # home page
    +fishR("posts")     # blog posts (some examples) page
    +fishR("books")     # examples page
    +fishR("IFAR")      # Introduction to Fisheries Analysis with R page
    +fishR("AIFFD")     # Analysis & Interpretation of Freshw. Fisher. Data page
    +fishR("packages")  # list of r-related fishereis packages
    +fishR("data")      # list of fisheries data sets
    +}
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/geomean.html b/docs/reference/geomean.html index ae34b482..01c7b2af 100644 --- a/docs/reference/geomean.html +++ b/docs/reference/geomean.html @@ -1,138 +1,142 @@ -Calculates the geometric mean or geometric standard deviation. — geomean • FSACalculates the geometric mean or geometric standard deviation. — geomean • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Calculates the geometric mean or standard deviation of a vector of numeric values.

    -
    -
    geomean(x, na.rm = FALSE, zneg.rm = FALSE)
    -
    -geosd(x, na.rm = FALSE, zneg.rm = FALSE)
    +
    +

    Usage

    +
    geomean(x, na.rm = FALSE, zneg.rm = FALSE)
    +
    +geosd(x, na.rm = FALSE, zneg.rm = FALSE)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    Vector of numeric values.

    + +
    na.rm

    Logical indicating whether to remove missing values or not.

    + +
    zneg.rm

    Logical indicating whether to ignore or remove zero or negative values found in x.

    +
    -
    -

    Value

    -

    A numeric value that is the geometric mean or geometric standard deviation of the numeric values in x.

    +
    +

    Value

    + + +

    A numeric value that is the geometric mean or geometric standard deviation of the numeric values in x.

    -
    -

    Details

    +
    +

    Details

    The geometric mean is computed by log transforming the raw data in x, computing the arithmetic mean of the transformed data, and back-transforming this mean to the geometric mean by exponentiating.

    The geometric standard deviation is computed by log transforming the raw data in x, computing the arithmetic standard deviation of the transformed data, and back-transforming this standard deviation to the geometric standard deviation by exponentiating.

    -
    -

    Note

    +
    +

    Note

    This function is largely an implementation of the code suggested by Russell Senior on R-help in November, 1999.

    -
    -

    See also

    +
    +

    See also

    See geometric.mean in psych and Gmean for geometric mean calculators. See Gsd (documented with Gmean) from DescTools for geometric standard deviation calculators.

    -
    -

    Examples

    -
    ## generate random lognormal data
    -d <- rlnorm(500,meanlog=0,sdlog=1)
    -# d has a mean on log scale of 0; thus, gm should be exp(0)~=1
    -# d has a sd on log scale of 1; thus, gsd should be exp(1)~=2.7
    -geomean(d)
    +    
    +

    Examples

    +
    ## generate random lognormal data
    +d <- rlnorm(500,meanlog=0,sdlog=1)
    +# d has a mean on log scale of 0; thus, gm should be exp(0)~=1
    +# d has a sd on log scale of 1; thus, gsd should be exp(1)~=2.7
    +geomean(d)
     #> [1] 0.934946
    -geosd(d)
    +geosd(d)
     #> [1] 2.663484
    -
    -if (FALSE) {
    -## Demonstrate handling of zeros and negative values
    -x <- seq(-1,5)
    -# this will given an error
    -geomean(x)
    -# this will only give a warning, but might not be what you want
    -geomean(x,zneg.rm=TRUE)
    -}
    -
    +
    +if (FALSE) {
    +## Demonstrate handling of zeros and negative values
    +x <- seq(-1,5)
    +# this will given an error
    +geomean(x)
    +# this will only give a warning, but might not be what you want
    +geomean(x,zneg.rm=TRUE)
    +}
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/growthModels.html b/docs/reference/growthModels.html index 8c99b52d..51b6b9b7 100644 --- a/docs/reference/growthModels.html +++ b/docs/reference/growthModels.html @@ -1,123 +1,141 @@ -Creates a function for a specific parameterization of the von Bertalanffy, Gompertz, Richards, and logistic growth functions. — growthModels • FSACreates a function for a specific parameterization of the von Bertalanffy, Gompertz, Richards, and logistic growth functions. — growthModels • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Creates a function for a specific parameterizations of the von Bertalanffy, Gompertz, Richards, and logistic growth functions. Use growthFunShow() to see the equations for each growth function.

    -
    -
    vbFuns(
    -  param = c("Typical", "typical", "Traditional", "traditional", "BevertonHolt",
    -    "Original", "original", "vonBertalanffy", "GQ", "GallucciQuinn", "Mooij", "Weisberg",
    -    "Ogle", "Schnute", "Francis", "Laslett", "Polacheck", "Somers", "Somers2", "Pauly",
    -    "Fabens", "Fabens2", "Wang", "Wang2", "Wang3", "Francis2", "Francis3"),
    -  simple = FALSE,
    -  msg = FALSE
    -)
    -
    -GompertzFuns(
    -  param = c("Ricker1", "Ricker2", "Ricker3", "QuinnDeriso1", "QuinnDeriso2",
    -    "QuinnDeriso3", "QD1", "QD2", "QD3", "Original", "original", "Troynikov1",
    -    "Troynikov2"),
    -  simple = FALSE,
    -  msg = FALSE
    -)
    -
    -RichardsFuns(param = 1, simple = FALSE, msg = FALSE)
    -
    -logisticFuns(
    -  param = c("CJ1", "CJ2", "Karkach", "Haddon", "CampanaJones1", "CampanaJones2"),
    -  simple = FALSE,
    -  msg = FALSE
    -)
    -
    -growthFunShow(
    -  type = c("vonBertalanffy", "Gompertz", "Richards", "Logistic", "Schnute",
    -    "SchnuteRichards"),
    -  param = NULL,
    -  case = param,
    -  plot = FALSE,
    -  ...
    -)
    +
    +

    Usage

    +
    vbFuns(
    +  param = c("Typical", "typical", "Traditional", "traditional", "BevertonHolt",
    +    "Original", "original", "vonBertalanffy", "GQ", "GallucciQuinn", "Mooij", "Weisberg",
    +    "Ogle", "Schnute", "Francis", "Laslett", "Polacheck", "Somers", "Somers2", "Pauly",
    +    "Fabens", "Fabens2", "Wang", "Wang2", "Wang3", "Francis2", "Francis3"),
    +  simple = FALSE,
    +  msg = FALSE
    +)
    +
    +GompertzFuns(
    +  param = c("Ricker1", "Ricker2", "Ricker3", "QuinnDeriso1", "QuinnDeriso2",
    +    "QuinnDeriso3", "QD1", "QD2", "QD3", "Original", "original", "Troynikov1",
    +    "Troynikov2"),
    +  simple = FALSE,
    +  msg = FALSE
    +)
    +
    +RichardsFuns(param = 1, simple = FALSE, msg = FALSE)
    +
    +logisticFuns(
    +  param = c("CJ1", "CJ2", "Karkach", "Haddon", "CampanaJones1", "CampanaJones2"),
    +  simple = FALSE,
    +  msg = FALSE
    +)
    +
    +growthFunShow(
    +  type = c("vonBertalanffy", "Gompertz", "Richards", "Logistic", "Schnute",
    +    "SchnuteRichards"),
    +  param = NULL,
    +  case = param,
    +  plot = FALSE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    param

    A string (for von Bertalanffy, Gompertz, and logistic) or numeric (for Richards) that indicates the specific parameterization of the growth function. See details.

    + +
    simple

    A logical that indicates whether the function will accept all parameter values in the first parameter argument (=FALSE; DEFAULT) or whether all individual parameters must be specified in separate arguments (=TRUE).

    + +
    msg

    A logical that indicates whether a message about the growth function and parameter definitions should be output (=TRUE) or not (=FALSE; DEFAULT).

    + +
    type

    A string (in growthFunShow) that indicates the type of growth function to show.

    + +
    case

    A numeric that indicates the specific case of the Schnute function to use. See details.

    + +
    plot

    A logical that indicates whether the growth function expression should be shown as an equation in a simple plot.

    + +
    ...

    Not implemented.

    +
    -
    -

    Value

    -

    The functions ending in xxxFuns return a function that can be used to predict fish size given a vector of ages and values for the growth function parameters and, in some parameterizations, values for constants. The result should be saved to an object that is then the function name. When the resulting function is used, the parameters are ordered as shown when the definitions of the parameters are printed after the function is called (if msg=TRUE). If simple=FALSE (DEFAULT), then the values for all parameters may be included as a vector in the first parameter argument (but in the same order). Similarly, the values for all constants may be included as a vector in the first constant argument (i.e., t1). If simple=TRUE, then all parameters and constants must be declared individually. The resulting function is somewhat easier to read when simple=TRUE, but is less general for some applications. -An expression of the equation for each growth function may be created with growthFunShow. In this function type= is used to select the major function type (e.g., von Bertalanffy, Gompertz, Richards, Logistic, Schnute) and param= is used to select a specific parameterization of that growth function. If plot=TRUE, then a simple graphic will be created with the equation using plotmath for a pretty format.

    +
    +

    Value

    + + +

    The functions ending in xxxFuns return a function that can be used to predict fish size given a vector of ages and values for the growth function parameters and, in some parameterizations, values for constants. The result should be saved to an object that is then the function name. When the resulting function is used, the parameters are ordered as shown when the definitions of the parameters are printed after the function is called (if msg=TRUE). If simple=FALSE (DEFAULT), then the values for all parameters may be included as a vector in the first parameter argument (but in the same order). Similarly, the values for all constants may be included as a vector in the first constant argument (i.e., t1). If simple=TRUE, then all parameters and constants must be declared individually. The resulting function is somewhat easier to read when simple=TRUE, but is less general for some applications.

    + + +

    An expression of the equation for each growth function may be created with growthFunShow. In this function type= is used to select the major function type (e.g., von Bertalanffy, Gompertz, Richards, Logistic, Schnute) and param= is used to select a specific parameterization of that growth function. If plot=TRUE, then a simple graphic will be created with the equation using plotmath for a pretty format.

    -
    -

    Note

    +
    +

    Note

    Take note of the following for parameterizations (i.e., param) of each growth function:

    • von Bertalanffy

      • The ‘Original’ and ‘vonBertalanffy’ are synonymous as are ‘Typical’, ‘Traditional’, and ‘BevertonHolt’. Further note that the ‘Ogle’ parameterization has the ‘Original’/‘vonBertalanffy’ and ‘Typical’/‘Traditional’/‘BevertonHolt’ parameterizations as special cases.

    • Gompertz

      • The ‘Ricker2’ and ‘QuinnDeriso1’ are synonymous, as are ‘Ricker3’ and ‘QuinnDeriso2’.

      • @@ -135,13 +153,13 @@

        Note

      • logistic

        • Within FSA, L0 is the mean length at age 0, Linf is the mean asymptotic length, ti is the age at the inflection point, and gninf is the instantaneous growth rate at negative infinity.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    12-Individual Growth.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    Campana, S.E. and C.M. Jones. 1992. Analysis of otolith microstructure data. Pages 73-100 In D.K. Stevenson and S.E. Campana, editors. Otolith microstructure examination and analysis. Canadian Special Publication of Fisheries and Aquatic Sciences 117. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/141734.pdf.]

    Fabens, A. 1965. Properties and fitting of the von Bertalanffy growth curve. Growth 29:265-289.

    Francis, R.I.C.C. 1988. Are growth parameters estimated from tagging and age-length data comparable? Canadian Journal of Fisheries and Aquatic Sciences, 45:936-942.

    @@ -167,20 +185,20 @@

    References

    Weisberg, S., G.R. Spangler, and L. S. Richmond. 2010. Mixed effects models for fish growth. Canadian Journal of Fisheries And Aquatic Sciences 67:269-277.

    Winsor, C.P. 1932. The Gompertz curve as a growth curve. Proceedings of the National Academy of Sciences. 18:1-8. [Was (is?) from http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1076153/pdf/pnas01729-0009.pdf.]

    -
    -

    See also

    +
    +

    See also

    See Schnute for an implementation of the Schnute (1981) model.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com, thanks to Gabor Grothendieck for a hint about using get().

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com, thanks to Gabor Grothendieck for a hint about using get().

    -
    -

    Examples

    -
    ###########################################################
    -## Simple Examples -- Von B
    -( vb1 <- vbFuns() )
    +    
    +

    Examples

    +
    ###########################################################
    +## Simple Examples -- Von B
    +( vb1 <- vbFuns() )
     #> function (t, Linf, K = NULL, t0 = NULL) 
     #> {
     #>     if (length(Linf) == 3) {
    @@ -190,12 +208,12 @@ 

    Examples

    #> } #> Linf * (1 - exp(-K * (t - t0))) #> } -#> <bytecode: 0x0000000023377c98> -#> <environment: 0x00000000221f1718> -ages <- 0:20 -plot(vb1(ages,Linf=20,K=0.3,t0=-0.2)~ages,type="b",pch=19) +#> <bytecode: 0x000002105fba4d48> +#> <environment: 0x000002105fbcbe48> +ages <- 0:20 +plot(vb1(ages,Linf=20,K=0.3,t0=-0.2)~ages,type="b",pch=19) -( vb2 <- vbFuns("Francis") ) +( vb2 <- vbFuns("Francis") ) #> function (t, L1, L2 = NULL, L3 = NULL, t1, t3 = NULL) #> { #> if (length(L1) == 3) { @@ -211,22 +229,22 @@

    Examples

    #> L1 + (L3 - L1) * ((1 - r^(2 * ((t - t1)/(t3 - t1))))/(1 - #> r^2)) #> } -#> <bytecode: 0x0000000021fd6358> -#> <environment: 0x0000000022342380> -plot(vb2(ages,L1=10,L2=19,L3=20,t1=2,t3=18)~ages,type="b",pch=19) +#> <bytecode: 0x000002105fbad888> +#> <environment: 0x000002105fd31f38> +plot(vb2(ages,L1=10,L2=19,L3=20,t1=2,t3=18)~ages,type="b",pch=19) -( vb2c <- vbFuns("Francis",simple=TRUE) ) # compare to vb2 +( vb2c <- vbFuns("Francis",simple=TRUE) ) # compare to vb2 #> function (t, L1, L2, L3, t1, t3) #> { #> r <- (L3 - L2)/(L2 - L1) #> L1 + (L3 - L1) * ((1 - r^(2 * ((t - t1)/(t3 - t1))))/(1 - #> r^2)) #> } -#> <bytecode: 0x000000002335ed58> -#> <environment: 0x000000002208b100> - -## Simple Examples -- Gompertz -( gomp1 <- GompertzFuns() ) +#> <bytecode: 0x000002105fbb9a00> +#> <environment: 0x000002105fe28d38> + +## Simple Examples -- Gompertz +( gomp1 <- GompertzFuns() ) #> function (t, Linf, gi = NULL, ti = NULL) #> { #> if (length(Linf) == 3) { @@ -236,11 +254,11 @@

    Examples

    #> } #> Linf * exp(-exp(-gi * (t - ti))) #> } -#> <bytecode: 0x0000000021e817a0> -#> <environment: 0x0000000021e75998> -plot(gomp1(ages,Linf=800,gi=0.5,ti=5)~ages,type="b",pch=19) +#> <bytecode: 0x000002105fe99ad0> +#> <environment: 0x000002105febb3c8> +plot(gomp1(ages,Linf=800,gi=0.5,ti=5)~ages,type="b",pch=19) -( gomp2 <- GompertzFuns("Ricker2") ) +( gomp2 <- GompertzFuns("Ricker2") ) #> function (t, L0, a = NULL, gi = NULL) #> { #> if (length(L0) == 3) { @@ -250,18 +268,18 @@

    Examples

    #> } #> L0 * exp(a * (1 - exp(-gi * t))) #> } -#> <bytecode: 0x0000000021e7ab98> -#> <environment: 0x0000000021918ef0> -plot(gomp2(ages,L0=2,a=6,gi=0.5)~ages,type="b",pch=19) +#> <bytecode: 0x000002105fe987c8> +#> <environment: 0x000002105fff2bf0> +plot(gomp2(ages,L0=2,a=6,gi=0.5)~ages,type="b",pch=19) -( gomp2c <- GompertzFuns("Ricker2",simple=TRUE) ) # compare to gomp2 +( gomp2c <- GompertzFuns("Ricker2",simple=TRUE) ) # compare to gomp2 #> function (t, L0, a, gi) #> { #> L0 * exp(a * (1 - exp(-gi * t))) #> } -#> <bytecode: 0x0000000021e7b960> -#> <environment: 0x00000000213c26a8> -( gompT <- GompertzFuns("Troynikov1")) +#> <bytecode: 0x000002105fea9a70> +#> <environment: 0x0000021060118300> +( gompT <- GompertzFuns("Troynikov1")) #> function (Lm, dt, Linf, gi = NULL) #> { #> if (length(Linf) == 2) { @@ -270,11 +288,11 @@

    Examples

    #> } #> Linf * ((Lm/Linf)^exp(-gi * dt)) - Lm #> } -#> <bytecode: 0x0000000021e76d20> -#> <environment: 0x00000000210d5210> - -## Simple Examples -- Richards -( rich1 <- RichardsFuns() ) +#> <bytecode: 0x000002105fe9aa40> +#> <environment: 0x00000210601954b8> + +## Simple Examples -- Richards +( rich1 <- RichardsFuns() ) #> function (t, Linf, k = NULL, a = NULL, b = NULL) #> { #> if (length(Linf) == 4) { @@ -285,11 +303,11 @@

    Examples

    #> } #> Linf * (1 - a * exp(-k * t))^b #> } -#> <bytecode: 0x0000000020efe080> -#> <environment: 0x00000000206aed38> -plot(rich1(ages,Linf=800,k=0.5,a=1,b=6)~ages,type="b",pch=19) +#> <bytecode: 0x00000210602287a0> +#> <environment: 0x00000210602828f8> +plot(rich1(ages,Linf=800,k=0.5,a=1,b=6)~ages,type="b",pch=19) -( rich2 <- RichardsFuns(2) ) +( rich2 <- RichardsFuns(2) ) #> function (t, Linf, k = NULL, ti = NULL, b = NULL) #> { #> if (length(Linf) == 4) { @@ -300,11 +318,11 @@

    Examples

    #> } #> Linf * (1 - (1/b) * exp(-k * (t - ti)))^b #> } -#> <bytecode: 0x0000000020ed16c8> -#> <environment: 0x000000001fe49f20> -plot(rich2(ages,Linf=800,k=0.5,ti=3,b=6)~ages,type="b",pch=19) +#> <bytecode: 0x0000021060220d80> +#> <environment: 0x00000210607a7a60> +plot(rich2(ages,Linf=800,k=0.5,ti=3,b=6)~ages,type="b",pch=19) -( rich3 <- RichardsFuns(3) ) +( rich3 <- RichardsFuns(3) ) #> function (t, Linf, k = NULL, ti = NULL, b = NULL) #> { #> if (length(Linf) == 4) { @@ -315,11 +333,11 @@

    Examples

    #> } #> Linf/((1 + b * exp(-k * (t - ti)))^(1/b)) #> } -#> <bytecode: 0x0000000020ece1b8> -#> <environment: 0x000000001f902610> -plot(rich3(ages,Linf=800,k=0.5,ti=3,b=0.15)~ages,type="b",pch=19) +#> <bytecode: 0x000002106022d278> +#> <environment: 0x0000021060932480> +plot(rich3(ages,Linf=800,k=0.5,ti=3,b=0.15)~ages,type="b",pch=19) -( rich4 <- RichardsFuns(4) ) +( rich4 <- RichardsFuns(4) ) #> function (t, Linf, k = NULL, ti = NULL, b = NULL) #> { #> if (length(Linf) == 4) { @@ -330,12 +348,12 @@

    Examples

    #> } #> Linf * (1 + (b - 1) * exp(-k * (t - ti)))^(1/(1 - b)) #> } -#> <bytecode: 0x0000000020ec09f0> -#> <environment: 0x000000001f6593a8> -plot(rich4(ages,Linf=800,k=0.5,ti=3,b=0.95)~ages,type="b",pch=19) -lines(rich4(ages,Linf=800,k=0.5,ti=3,b=1.5)~ages,type="b",pch=19,col="blue") +#> <bytecode: 0x000002106021e088> +#> <environment: 0x0000021060a2b550> +plot(rich4(ages,Linf=800,k=0.5,ti=3,b=0.95)~ages,type="b",pch=19) +lines(rich4(ages,Linf=800,k=0.5,ti=3,b=1.5)~ages,type="b",pch=19,col="blue") -( rich5 <- RichardsFuns(5) ) +( rich5 <- RichardsFuns(5) ) #> function (t, Linf, k = NULL, L0 = NULL, b = NULL) #> { #> if (length(Linf) == 4) { @@ -347,11 +365,11 @@

    Examples

    #> Linf * (1 + (((L0/Linf)^(1 - b)) - 1) * exp(-k * t))^(1/(1 - #> b)) #> } -#> <bytecode: 0x00000000206b9898> -#> <environment: 0x000000001f346f08> -plot(rich5(ages,Linf=800,k=0.5,L0=50,b=1.5)~ages,type="b",pch=19) +#> <bytecode: 0x0000021060239c78> +#> <environment: 0x0000021060bb3f78> +plot(rich5(ages,Linf=800,k=0.5,L0=50,b=1.5)~ages,type="b",pch=19) -( rich6 <- RichardsFuns(6) ) +( rich6 <- RichardsFuns(6) ) #> function (t, Linf, k = NULL, ti = NULL, Lninf = NULL, b = NULL) #> { #> if (length(Linf) == 5) { @@ -364,20 +382,20 @@

    Examples

    #> Lninf + (Linf - Lninf) * (1 + (b - 1) * exp(-k * (t - ti)))^(1/(1 - #> b)) #> } -#> <bytecode: 0x00000000206afd08> -#> <environment: 0x000000000a9b4150> -plot(rich6(ages,Linf=800,k=0.5,ti=3,Lninf=50,b=1.5)~ages,type="b",pch=19) +#> <bytecode: 0x00000210602562f0> +#> <environment: 0x0000021060cb1278> +plot(rich6(ages,Linf=800,k=0.5,ti=3,Lninf=50,b=1.5)~ages,type="b",pch=19) -( rich2c <- RichardsFuns(2,simple=TRUE) ) # compare to rich2 +( rich2c <- RichardsFuns(2,simple=TRUE) ) # compare to rich2 #> function (t, Linf, k, ti, b) #> { #> Linf * (1 - (1/b) * exp(-k * (t - ti)))^b #> } -#> <bytecode: 0x0000000020ecea18> -#> <environment: 0x000000000a5d5660> - -## Simple Examples -- Logistic -( log1 <- logisticFuns() ) +#> <bytecode: 0x000002106022dba8> +#> <environment: 0x0000021060de43a0> + +## Simple Examples -- Logistic +( log1 <- logisticFuns() ) #> function (t, Linf, gninf = NULL, ti = NULL) #> { #> if (length(Linf) == 3) { @@ -387,11 +405,11 @@

    Examples

    #> } #> Linf/(1 + exp(-gninf * (t - ti))) #> } -#> <bytecode: 0x000000000a4f8628> -#> <environment: 0x000000000a4a9748> -plot(log1(ages,Linf=800,gninf=0.5,ti=5)~ages,type="b",pch=19) +#> <bytecode: 0x0000021060e649a0> +#> <environment: 0x0000021060e6f0b8> +plot(log1(ages,Linf=800,gninf=0.5,ti=5)~ages,type="b",pch=19) -( log2 <- logisticFuns("CJ2") ) +( log2 <- logisticFuns("CJ2") ) #> function (t, Linf, gninf = NULL, a = NULL) #> { #> if (length(Linf) == 3) { @@ -401,18 +419,18 @@

    Examples

    #> } #> Linf/(1 + a * exp(-gninf * t)) #> } -#> <bytecode: 0x000000000a4f63c0> -#> <environment: 0x0000000009861da8> -plot(log2(ages,Linf=800,gninf=0.5,a=10)~ages,type="b",pch=19) +#> <bytecode: 0x0000021060e61588> +#> <environment: 0x0000021060f701a8> +plot(log2(ages,Linf=800,gninf=0.5,a=10)~ages,type="b",pch=19) -( log2c <- logisticFuns("CJ2",simple=TRUE) ) # compare to log2 +( log2c <- logisticFuns("CJ2",simple=TRUE) ) # compare to log2 #> function (t, Linf, gninf, a) #> { #> Linf/(1 + a * exp(-gninf * t)) #> } -#> <bytecode: 0x000000000a4f76c8> -#> <environment: 0x0000000007884d80> -( log3 <- logisticFuns("Karkach") ) +#> <bytecode: 0x0000021060e60910> +#> <environment: 0x00000210610cf738> +( log3 <- logisticFuns("Karkach") ) #> function (t, Linf, L0 = NULL, gninf = NULL) #> { #> if (length(Linf) == 3) { @@ -422,11 +440,11 @@

    Examples

    #> } #> L0 * Linf/(L0 + (Linf - L0) * exp(-gninf * t)) #> } -#> <bytecode: 0x000000000a4f7d20> -#> <environment: 0x00000000076ddf00> -plot(log3(ages,L0=10,Linf=800,gninf=0.5)~ages,type="b",pch=19) +#> <bytecode: 0x0000021060e60360> +#> <environment: 0x00000210611991f0> +plot(log3(ages,L0=10,Linf=800,gninf=0.5)~ages,type="b",pch=19) -( log4 <- logisticFuns("Haddon") ) +( log4 <- logisticFuns("Haddon") ) #> function (Lm, dLmax, L50 = NULL, L95 = NULL) #> { #> if (length(dLmax) == 3) { @@ -436,24 +454,24 @@

    Examples

    #> } #> dLmax/(1 + exp(log(19) * ((Lm - L50)/(L95 - L50)))) #> } -#> <bytecode: 0x000000000a4f3b60> -#> <environment: 0x0000000021c11750> - - -########################################################### -## Examples of fitting -## After the last example a plot is constructed with three -## or four lines on top of each other illustrating that the -## parameterizations all produce the same fitted values. -## However, observe the correlations in the summary() results. - -## Von B -plot(tl~age,data=SpotVA1,pch=19) - -# Fitting the typical parameterization of the von B function -fit1 <- nls(tl~vb1(age,Linf,K,t0),data=SpotVA1, - start=vbStarts(tl~age,data=SpotVA1)) -summary(fit1,correlation=TRUE) +#> <bytecode: 0x0000021060e72d20> +#> <environment: 0x000002106129ba48> + + +########################################################### +## Examples of fitting +## After the last example a plot is constructed with three +## or four lines on top of each other illustrating that the +## parameterizations all produce the same fitted values. +## However, observe the correlations in the summary() results. + +## Von B +plot(tl~age,data=SpotVA1,pch=19) + +# Fitting the typical parameterization of the von B function +fit1 <- nls(tl~vb1(age,Linf,K,t0),data=SpotVA1, + start=vbStarts(tl~age,data=SpotVA1)) +summary(fit1,correlation=TRUE) #> #> Formula: tl ~ vb1(age, Linf, K, t0) #> @@ -475,12 +493,12 @@

    Examples

    #> Number of iterations to convergence: 10 #> Achieved convergence tolerance: 5.49e-06 #> -curve(vb1(x,Linf=coef(fit1)),from=0,to=5,col="red",lwd=10,add=TRUE) - -# Fitting the Francis parameterization of the von B function -fit2 <- nls(tl~vb2c(age,L1,L2,L3,t1=0,t3=5),data=SpotVA1, - start=vbStarts(tl~age,data=SpotVA1,type="Francis",ages2use=c(0,5))) -summary(fit2,correlation=TRUE) +curve(vb1(x,Linf=coef(fit1)),from=0,to=5,col="red",lwd=10,add=TRUE) + +# Fitting the Francis parameterization of the von B function +fit2 <- nls(tl~vb2c(age,L1,L2,L3,t1=0,t3=5),data=SpotVA1, + start=vbStarts(tl~age,data=SpotVA1,type="Francis",ages2use=c(0,5))) +summary(fit2,correlation=TRUE) #> #> Formula: tl ~ vb2c(age, L1, L2, L3, t1 = 0, t3 = 5) #> @@ -502,14 +520,14 @@

    Examples

    #> Number of iterations to convergence: 7 #> Achieved convergence tolerance: 9.455e-06 #> -curve(vb2c(x,L1=coef(fit2)[1],L2=coef(fit2)[2],L3=coef(fit2)[3],t1=0,t3=5), - from=0,to=5,col="blue",lwd=5,add=TRUE) - -# Fitting the Schnute parameterization of the von B function -vb3 <- vbFuns("Schnute") -fit3 <- nls(tl~vb3(age,L1,L3,K,t1=0,t3=4),data=SpotVA1, - start=vbStarts(tl~age,data=SpotVA1,type="Schnute",ages2use=c(0,4))) -summary(fit3,correlation=TRUE) +curve(vb2c(x,L1=coef(fit2)[1],L2=coef(fit2)[2],L3=coef(fit2)[3],t1=0,t3=5), + from=0,to=5,col="blue",lwd=5,add=TRUE) + +# Fitting the Schnute parameterization of the von B function +vb3 <- vbFuns("Schnute") +fit3 <- nls(tl~vb3(age,L1,L3,K,t1=0,t3=4),data=SpotVA1, + start=vbStarts(tl~age,data=SpotVA1,type="Schnute",ages2use=c(0,4))) +summary(fit3,correlation=TRUE) #> #> Formula: tl ~ vb3(age, L1, L3, K, t1 = 0, t3 = 4) #> @@ -531,29 +549,29 @@

    Examples

    #> Number of iterations to convergence: 8 #> Achieved convergence tolerance: 8.156e-06 #> -curve(vb3(x,L1=coef(fit3),t1=c(0,4)),from=0,to=5,col="green",lwd=2,add=TRUE) +curve(vb3(x,L1=coef(fit3),t1=c(0,4)),from=0,to=5,col="green",lwd=2,add=TRUE) - -## Gompertz -# Make some fake data using the original parameterization -gompO <- GompertzFuns("original") -# setup ages, sample sizes (general reduction in numbers with -# increasing age), and additive SD to model -t <- 1:15 -n <- c(10,40,35,25,12,10,10,8,6,5,3,3,3,2,2) -sd <- 15 -# expand ages -ages <- rep(t,n) -# get lengths from gompertz and a random error for individuals -lens <- gompO(ages,Linf=450,a=1,gi=0.3)+rnorm(length(ages),0,sd) -# put together as a data.frame -df <- data.frame(age=ages,len=round(lens,0)) - -plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5)) - -# Fit first Ricker parameterization -fit1 <- nls(len~gomp1(age,Linf,gi,ti),data=df,start=list(Linf=500,gi=0.3,ti=3)) -summary(fit1,correlation=TRUE) + +## Gompertz +# Make some fake data using the original parameterization +gompO <- GompertzFuns("original") +# setup ages, sample sizes (general reduction in numbers with +# increasing age), and additive SD to model +t <- 1:15 +n <- c(10,40,35,25,12,10,10,8,6,5,3,3,3,2,2) +sd <- 15 +# expand ages +ages <- rep(t,n) +# get lengths from gompertz and a random error for individuals +lens <- gompO(ages,Linf=450,a=1,gi=0.3)+rnorm(length(ages),0,sd) +# put together as a data.frame +df <- data.frame(age=ages,len=round(lens,0)) + +plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5)) + +# Fit first Ricker parameterization +fit1 <- nls(len~gomp1(age,Linf,gi,ti),data=df,start=list(Linf=500,gi=0.3,ti=3)) +summary(fit1,correlation=TRUE) #> #> Formula: len ~ gomp1(age, Linf, gi, ti) #> @@ -575,11 +593,11 @@

    Examples

    #> Number of iterations to convergence: 3 #> Achieved convergence tolerance: 5.463e-07 #> -curve(gomp1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE) - -# Fit third Ricker parameterization -fit2 <- nls(len~gomp2(age,L0,a,gi),data=df,start=list(L0=30,a=3,gi=0.3)) -summary(fit2,correlation=TRUE) +curve(gomp1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE) + +# Fit third Ricker parameterization +fit2 <- nls(len~gomp2(age,L0,a,gi),data=df,start=list(L0=30,a=3,gi=0.3)) +summary(fit2,correlation=TRUE) #> #> Formula: len ~ gomp2(age, L0, a, gi) #> @@ -601,12 +619,12 @@

    Examples

    #> Number of iterations to convergence: 3 #> Achieved convergence tolerance: 9.415e-06 #> -curve(gomp2(x,L0=coef(fit2)),from=0,to=15,col="blue",lwd=5,add=TRUE) - -# Fit third Quinn and Deriso parameterization (using simple=TRUE model) -gomp3 <- GompertzFuns("QD3",simple=TRUE) -fit3 <- nls(len~gomp3(age,Linf,gi,t0),data=df,start=list(Linf=500,gi=0.3,t0=0)) -summary(fit3,correlation=TRUE) +curve(gomp2(x,L0=coef(fit2)),from=0,to=15,col="blue",lwd=5,add=TRUE) + +# Fit third Quinn and Deriso parameterization (using simple=TRUE model) +gomp3 <- GompertzFuns("QD3",simple=TRUE) +fit3 <- nls(len~gomp3(age,Linf,gi,t0),data=df,start=list(Linf=500,gi=0.3,t0=0)) +summary(fit3,correlation=TRUE) #> #> Formula: len ~ gomp3(age, Linf, gi, t0) #> @@ -628,32 +646,32 @@

    Examples

    #> Number of iterations to convergence: 4 #> Achieved convergence tolerance: 4.607e-07 #> -curve(gomp3(x,Linf=coef(fit3)[1],gi=coef(fit3)[2],t0=coef(fit3)[3]), - from=0,to=15,col="green",lwd=2,add=TRUE) +curve(gomp3(x,Linf=coef(fit3)[1],gi=coef(fit3)[2],t0=coef(fit3)[3]), + from=0,to=15,col="green",lwd=2,add=TRUE) - -## Richards - -if (FALSE) { -# Fit first Richards parameterization ... DOES NOT CONVERGE -fit1 <- nls(len~rich1(age,Linf,k,a,b),data=df, - start=list(Linf=450,k=0.3,a=0.2,b=3)) -summary(fit1,correlation=TRUE) -curve(rich1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE) - -# Fit second Richards parameterization ... DOES NOT CONVERGE -fit2 <- nls(len~rich2(age,Linf,k,ti,b),data=df, - start=list(Linf=450,k=0.25,ti=3,b=3)) -summary(fit2,correlation=TRUE) -curve(rich2(x,Linf=coef(fit2)),from=0,to=15,col="blue",lwd=7,add=TRUE) -} - -plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5)) - -# Fit third Richards parameterization -fit3 <- nls(len~rich3(age,Linf,k,ti,b),data=df, - start=list(Linf=450,k=0.25,ti=3,b=-0.1)) -summary(fit3,correlation=TRUE) + +## Richards + +if (FALSE) { +# Fit first Richards parameterization ... DOES NOT CONVERGE +fit1 <- nls(len~rich1(age,Linf,k,a,b),data=df, + start=list(Linf=450,k=0.3,a=0.2,b=3)) +summary(fit1,correlation=TRUE) +curve(rich1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE) + +# Fit second Richards parameterization ... DOES NOT CONVERGE +fit2 <- nls(len~rich2(age,Linf,k,ti,b),data=df, + start=list(Linf=450,k=0.25,ti=3,b=3)) +summary(fit2,correlation=TRUE) +curve(rich2(x,Linf=coef(fit2)),from=0,to=15,col="blue",lwd=7,add=TRUE) +} + +plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5)) + +# Fit third Richards parameterization +fit3 <- nls(len~rich3(age,Linf,k,ti,b),data=df, + start=list(Linf=450,k=0.25,ti=3,b=-0.1)) +summary(fit3,correlation=TRUE) #> #> Formula: len ~ rich3(age, Linf, k, ti, b) #> @@ -677,12 +695,12 @@

    Examples

    #> Number of iterations to convergence: 4 #> Achieved convergence tolerance: 8.624e-06 #> -curve(rich3(x,Linf=coef(fit3)),from=0,to=15,col="green",lwd=4,add=TRUE) - -# Fit fourth Richards parameterization -fit4 <- nls(len~rich4(age,Linf,k,ti,b),data=df, - start=list(Linf=450,k=0.25,ti=3,b=0.7)) -summary(fit4,correlation=TRUE) +curve(rich3(x,Linf=coef(fit3)),from=0,to=15,col="green",lwd=4,add=TRUE) + +# Fit fourth Richards parameterization +fit4 <- nls(len~rich4(age,Linf,k,ti,b),data=df, + start=list(Linf=450,k=0.25,ti=3,b=0.7)) +summary(fit4,correlation=TRUE) #> #> Formula: len ~ rich4(age, Linf, k, ti, b) #> @@ -706,16 +724,16 @@

    Examples

    #> Number of iterations to convergence: 4 #> Achieved convergence tolerance: 2.301e-06 #> -curve(rich4(x,Linf=coef(fit4)),from=0,to=15,col="black",lwd=1,add=TRUE) +curve(rich4(x,Linf=coef(fit4)),from=0,to=15,col="black",lwd=1,add=TRUE) - -## Logistic -plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5)) - -# Fit first Campana-Jones parameterization -fit1 <- nls(len~log1(age,Linf,gninf,ti),data=df, - start=list(Linf=450,gninf=0.45,ti=4)) -summary(fit1,correlation=TRUE) + +## Logistic +plot(len~age,data=df,pch=19,col=rgb(0,0,0,1/5)) + +# Fit first Campana-Jones parameterization +fit1 <- nls(len~log1(age,Linf,gninf,ti),data=df, + start=list(Linf=450,gninf=0.45,ti=4)) +summary(fit1,correlation=TRUE) #> #> Formula: len ~ log1(age, Linf, gninf, ti) #> @@ -737,12 +755,12 @@

    Examples

    #> Number of iterations to convergence: 4 #> Achieved convergence tolerance: 7.635e-07 #> -curve(log1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE) - -# Fit second Campana-Jones parameterization -fit2 <- nls(len~log2(age,Linf,gninf,a),data=df, - start=list(Linf=450,gninf=0.45,a=7)) -summary(fit2,correlation=TRUE) +curve(log1(x,Linf=coef(fit1)),from=0,to=15,col="red",lwd=10,add=TRUE) + +# Fit second Campana-Jones parameterization +fit2 <- nls(len~log2(age,Linf,gninf,a),data=df, + start=list(Linf=450,gninf=0.45,a=7)) +summary(fit2,correlation=TRUE) #> #> Formula: len ~ log2(age, Linf, gninf, a) #> @@ -764,13 +782,13 @@

    Examples

    #> Number of iterations to convergence: 4 #> Achieved convergence tolerance: 8.456e-06 #> -curve(log2(x,Linf=coef(fit2)),from=0,to=15,col="blue",lwd=5,add=TRUE) - -# Fit Karkach parameterization (using simple=TRUE model) -log3 <- logisticFuns("Karkach",simple=TRUE) -fit3 <- nls(len~log3(age,Linf,L0,gninf),data=df, - start=list(Linf=450,L0=30,gninf=0.45)) -summary(fit3,correlation=TRUE) +curve(log2(x,Linf=coef(fit2)),from=0,to=15,col="blue",lwd=5,add=TRUE) + +# Fit Karkach parameterization (using simple=TRUE model) +log3 <- logisticFuns("Karkach",simple=TRUE) +fit3 <- nls(len~log3(age,Linf,L0,gninf),data=df, + start=list(Linf=450,L0=30,gninf=0.45)) +summary(fit3,correlation=TRUE) #> #> Formula: len ~ log3(age, Linf, L0, gninf) #> @@ -792,64 +810,60 @@

    Examples

    #> Number of iterations to convergence: 5 #> Achieved convergence tolerance: 6.738e-06 #> -curve(log3(x,Linf=coef(fit3)[1],L0=coef(fit3)[2],gninf=coef(fit3)[3]), - from=0,to=15,col="green",lwd=2,add=TRUE) +curve(log3(x,Linf=coef(fit3)[1],L0=coef(fit3)[2],gninf=coef(fit3)[3]), + from=0,to=15,col="green",lwd=2,add=TRUE) - - -############################################################################# -## Create expressions of the models -############################################################################# -# Typical von Bertalanffy ... Show as a stand-alone plot -growthFunShow("vonBertalanffy","Typical",plot=TRUE) + + +############################################################################# +## Create expressions of the models +############################################################################# +# Typical von Bertalanffy ... Show as a stand-alone plot +growthFunShow("vonBertalanffy","Typical",plot=TRUE) #> expression(E(L[t]) == L[infinity] * bgroup("(", 1 - e^{ #> -K * (t ~ -~t[0]) #> }, ")")) -# Get and save the expression -( tmp <- growthFunShow("vonBertalanffy","Typical") ) +# Get and save the expression +( tmp <- growthFunShow("vonBertalanffy","Typical") ) #> expression(E(L[t]) == L[infinity] * bgroup("(", 1 - e^{ #> -K * (t ~ -~t[0]) #> }, ")")) -# Use expression as title on a plot -lens <- vb1(ages,Linf=20,K=0.3,t0=-0.2) -plot(lens~ages,type="b",pch=19,main=tmp) -# Put expression in the main plot -text(10,5,tmp) +# Use expression as title on a plot +lens <- vb1(ages,Linf=20,K=0.3,t0=-0.2) +plot(lens~ages,type="b",pch=19,main=tmp) +# Put expression in the main plot +text(10,5,tmp) -# Put multiple expressions on a plot -op <- par(mar=c(0.1,0.1,0.1,0.1)) -plot(0,type="n",xlab="",ylab="",xlim=c(0,1),ylim=c(0,3),xaxt="n",yaxt="n") -text(0,2.5,"Original:",pos=4) -text(0.5,2.5,growthFunShow("vonBertalanffy","Original")) -text(0,1.5,"Typical:",pos=4) -text(0.5,1.5,growthFunShow("vonBertalanffy","Typical")) -text(0,0.5,"Francis:",pos=4) -text(0.5,0.5,growthFunShow("vonBertalanffy","Francis")) +# Put multiple expressions on a plot +op <- par(mar=c(0.1,0.1,0.1,0.1)) +plot(0,type="n",xlab="",ylab="",xlim=c(0,1),ylim=c(0,3),xaxt="n",yaxt="n") +text(0,2.5,"Original:",pos=4) +text(0.5,2.5,growthFunShow("vonBertalanffy","Original")) +text(0,1.5,"Typical:",pos=4) +text(0.5,1.5,growthFunShow("vonBertalanffy","Typical")) +text(0,0.5,"Francis:",pos=4) +text(0.5,0.5,growthFunShow("vonBertalanffy","Francis")) -par(op) +par(op)
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/headtail.html b/docs/reference/headtail.html index 0d71b099..401aa094 100644 --- a/docs/reference/headtail.html +++ b/docs/reference/headtail.html @@ -1,100 +1,112 @@ -Shows rows from the head and tail of a data frame or matrix. — headtail • FSAShows rows from the head and tail of a data frame or matrix. — headtail • FSA + + Skip to contents -
    -
    -
    - +
    +
    +
    -
    +

    Shows rows from the head and tail of a data frame or matrix.

    -
    -
    headtail(x, n = 3L, which = NULL, addrownums = TRUE, ...)
    +
    +

    Usage

    +
    headtail(x, n = 3L, which = NULL, addrownums = TRUE, ...)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A data frame or matrix.

    + +
    n

    A single numeric that indicates the number of rows to display from each of the head and tail of structure.

    + +
    which

    A numeric or string vector that contains the column numbers or names to display. Defaults to showing all columns.

    + +
    addrownums

    If there are no row names for the MATRIX, then create them from the row numbers.

    + +
    ...

    Arguments to be passed to or from other methods.

    +
    -
    -

    Value

    -

    A matrix or data.frame with 2*n rows.

    +
    +

    Value

    + + +

    A matrix or data.frame with 2*n rows.

    -
    -

    Note

    +
    +

    Note

    If n is larger than the number of rows in x then all of x is displayed.

    -
    -

    See also

    +
    +

    See also

    peek

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    headtail(iris)
    +    
    +

    Examples

    +
    headtail(iris)
     #>     Sepal.Length Sepal.Width Petal.Length Petal.Width   Species
     #> 1            5.1         3.5          1.4         0.2    setosa
     #> 2            4.9         3.0          1.4         0.2    setosa
    @@ -102,7 +114,7 @@ 

    Examples

    #> 148 6.5 3.0 5.2 2.0 virginica #> 149 6.2 3.4 5.4 2.3 virginica #> 150 5.9 3.0 5.1 1.8 virginica -headtail(iris,10) +headtail(iris,10) #> Sepal.Length Sepal.Width Petal.Length Petal.Width Species #> 1 5.1 3.5 1.4 0.2 setosa #> 2 4.9 3.0 1.4 0.2 setosa @@ -124,7 +136,7 @@

    Examples

    #> 148 6.5 3.0 5.2 2.0 virginica #> 149 6.2 3.4 5.4 2.3 virginica #> 150 5.9 3.0 5.1 1.8 virginica -headtail(iris,which=c("Sepal.Length","Sepal.Width","Species")) +headtail(iris,which=c("Sepal.Length","Sepal.Width","Species")) #> Sepal.Length Sepal.Width Species #> 1 5.1 3.5 setosa #> 2 4.9 3.0 setosa @@ -132,7 +144,7 @@

    Examples

    #> 148 6.5 3.0 virginica #> 149 6.2 3.4 virginica #> 150 5.9 3.0 virginica -headtail(iris,which=grep("Sepal",names(iris))) +headtail(iris,which=grep("Sepal",names(iris))) #> Sepal.Length Sepal.Width #> 1 5.1 3.5 #> 2 4.9 3.0 @@ -140,7 +152,7 @@

    Examples

    #> 148 6.5 3.0 #> 149 6.2 3.4 #> 150 5.9 3.0 -headtail(iris,n=200) +headtail(iris,n=200) #> Sepal.Length Sepal.Width Petal.Length Petal.Width Species #> 1 5.1 3.5 1.4 0.2 setosa #> 2 4.9 3.0 1.4 0.2 setosa @@ -292,10 +304,10 @@

    Examples

    #> 148 6.5 3.0 5.2 2.0 virginica #> 149 6.2 3.4 5.4 2.3 virginica #> 150 5.9 3.0 5.1 1.8 virginica - -## Make a matrix for demonstration purposes only -miris <- as.matrix(iris[,1:4]) -headtail(miris) + +## Make a matrix for demonstration purposes only +miris <- as.matrix(iris[,1:4]) +headtail(miris) #> Sepal.Length Sepal.Width Petal.Length Petal.Width #> [1,] 5.1 3.5 1.4 0.2 #> [2,] 4.9 3.0 1.4 0.2 @@ -303,7 +315,7 @@

    Examples

    #> [148,] 6.5 3.0 5.2 2.0 #> [149,] 6.2 3.4 5.4 2.3 #> [150,] 5.9 3.0 5.1 1.8 -headtail(miris,10) +headtail(miris,10) #> Sepal.Length Sepal.Width Petal.Length Petal.Width #> [1,] 5.1 3.5 1.4 0.2 #> [2,] 4.9 3.0 1.4 0.2 @@ -325,7 +337,7 @@

    Examples

    #> [148,] 6.5 3.0 5.2 2.0 #> [149,] 6.2 3.4 5.4 2.3 #> [150,] 5.9 3.0 5.1 1.8 -headtail(miris,addrownums=FALSE) +headtail(miris,addrownums=FALSE) #> Sepal.Length Sepal.Width Petal.Length Petal.Width #> [1,] 5.1 3.5 1.4 0.2 #> [2,] 4.9 3.0 1.4 0.2 @@ -333,7 +345,7 @@

    Examples

    #> [4,] 6.5 3.0 5.2 2.0 #> [5,] 6.2 3.4 5.4 2.3 #> [6,] 5.9 3.0 5.1 1.8 -headtail(miris,10,which=2:4) +headtail(miris,10,which=2:4) #> Sepal.Width Petal.Length Petal.Width #> [1,] 3.5 1.4 0.2 #> [2,] 3.0 1.4 0.2 @@ -355,18 +367,18 @@

    Examples

    #> [148,] 3.0 5.2 2.0 #> [149,] 3.4 5.4 2.3 #> [150,] 3.0 5.1 1.8 - -## Make a tbl_df type from tibble ... note how headtail() -## is not limited by the tbl_df restriction on number of -## rows to show (but head() is). -if (require(tibble)) { - iris2 <- as_tibble(iris) - class(iris2) - headtail(iris2,n=15) - head(iris2,n=15) -} + +## Make a tbl_df type from tibble ... note how headtail() +## is not limited by the tbl_df restriction on number of +## rows to show (but head() is). +if (require(tibble)) { + iris2 <- as_tibble(iris) + class(iris2) + headtail(iris2,n=15) + head(iris2,n=15) +} #> Loading required package: tibble -#> # A tibble: 15 x 5 +#> # A tibble: 15 × 5 #> Sepal.Length Sepal.Width Petal.Length Petal.Width Species #> <dbl> <dbl> <dbl> <dbl> <fct> #> 1 5.1 3.5 1.4 0.2 setosa @@ -386,26 +398,22 @@

    Examples

    #> 15 5.8 4 1.2 0.2 setosa
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/hist.formula.html b/docs/reference/hist.formula.html index b078a448..f809f16d 100644 --- a/docs/reference/hist.formula.html +++ b/docs/reference/hist.formula.html @@ -1,132 +1,170 @@ -Creates separate histograms by levels. — hist.formula • FSACreates separate histograms by levels. — hist.formula • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Creates separate histograms of a quantitative variable by levels of a factor variable.

    -
    -
    # S3 method for formula
    -hist(
    -  formula,
    -  data = NULL,
    -  main = "",
    -  right = FALSE,
    -  pre.main = "",
    -  xlab = NULL,
    -  ylab = "Frequency",
    -  same.breaks = TRUE,
    -  breaks = "Sturges",
    -  w = NULL,
    -  same.ylim = TRUE,
    -  ymax = NULL,
    -  col = "gray90",
    -  nrow = round(sqrt(num)),
    -  ncol = ceiling(sqrt(num)),
    -  byrow = TRUE,
    -  iaxs = TRUE,
    -  ...
    -)
    +
    +

    Usage

    +
    # S3 method for formula
    +hist(
    +  formula,
    +  data = NULL,
    +  main = "",
    +  right = FALSE,
    +  pre.main = "",
    +  xlab = NULL,
    +  ylab = "Frequency",
    +  same.breaks = TRUE,
    +  breaks = "Sturges",
    +  w = NULL,
    +  same.ylim = TRUE,
    +  ymax = NULL,
    +  col = "gray90",
    +  nrow = round(sqrt(num)),
    +  ncol = ceiling(sqrt(num)),
    +  byrow = TRUE,
    +  iaxs = TRUE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    formula

    A formula. See details.

    + +
    data

    An optional data frame that contains the variables in the model.

    + +
    main

    A character string used as the main title for when a SINGLE histogram is produced.

    + +
    right

    A logical that indicates if the histogram bins are right-closed (left open) intervals (=TRUE) or not (=FALSE; default).

    + +
    pre.main

    A character string to be used as a prefix for the main title when multiple histograms are produced. See details.

    + +
    xlab

    A character label for the x-axis. Defaults to name of quantitative variable in formula.

    + +
    ylab

    A character label for the y-axis. Defaults to “Frequency”.

    + +
    same.breaks

    A logical that indicates whether the same break values (i.e., bins) should be used on each histogram. Ignored if breaks or w is provided by the user. Defaults to TRUE.

    + +
    breaks

    A single numeric that indicates the number of bins or breaks or a vector that contains the lower values of the breaks. Ignored if w is not NULL. See hist for more details.

    + +
    w

    A single numeric that indicates the width of the bins to use. The bins will start at “rounded” values depending on the value of w. See lencat for more details.

    + +
    same.ylim

    A logical that indicates whether the same limits for the y-axis should be used on each histogram. Defaults to TRUE.

    + +
    ymax

    A single value that sets the maximum y-axis limit for each histogram or a vector of length equal to the number of groups that sets the maximum y-axis limit for each histogram separately. If NULL (default), then a value will be found.

    + +
    col

    A string that indicates the color for the bars on the histogram. Defaults to a light shade of gray (i.e., "gray90").

    + +
    nrow

    A single numeric that contains the number of rows to use on the graphic.

    + +
    ncol

    A single numeric that contains the number of columns to use on the graphic.

    + +
    byrow

    A single logical that indicates if the histograms should fill rows first (=TRUE or columns first (=FALSE).

    + +
    iaxs

    A single logical that indicates whether both axes should be plotted using xaxs="i" and yaxs="i" (the default) or xaxs="r" and yaxs="r" (what R typically does).

    + +
    ...

    Other arguments to pass through to the default hist().

    +
    -
    -

    Value

    -

    A graphic is produced and nothing is returned unless formula results in only one histogram. In that case, an object of class "histogram" is returned, which is described in hist.

    +
    +

    Value

    + + +

    A graphic is produced and nothing is returned unless formula results in only one histogram. In that case, an object of class "histogram" is returned, which is described in hist.

    -
    -

    Details

    +
    +

    Details

    The formula must be of the form ~quantitative, quantitative~1, quantitative~factor, or quantitative~factor*factor2 where quantitative is the quantitative variable to construct the histograms for and factor or factor2 are factor variables that contain the levels for which separate histograms should be constructed.

    If the formula is of the form ~quantitative or quantitative~1 then only a single histogram of the quantitative variable will be produced. This allows hist.formula() to be used similarly to hist() but with a data= argument.

    The function produces a single (but see below) graphic that consists of a grid on which the separate histograms are printed. The rows and columns of this grid are determined to construct a plot that is as square as possible. However, the rows and columns can be set by the user with the nrow= and ncol= arguments. If the product of the number of rows and number of columns set by the user is less than the total number of histograms to be constructed then multiple pages of histograms will be produced (each requiring the user to click on the graph to go to the next graph). The x-axis of each separate histogram will be labeled identically. The default x-axis label is the name of the quantitative variable. This can be changed by the user with the xlab= argument.

    @@ -134,75 +172,71 @@

    Details

    The default for right= is not the same as that used in hist() from graphics. Thus, right-open (left-closed) bins are the default.

    The iaxs= argument defaults to TRUE so that xaxs="i" and yaxs="i" are used for both axes, which eliminates the “floating” x-axis that R typically plots for histograms.

    -
    -

    Note

    +
    +

    Note

    Students often need to look at the distribution of a quantitative variable separated for different levels of a categorical variable. One method for examining these distributions is with boxplot(quantitative~factor). Other methods use functions in Lattice and ggplots2 but these packages have some learning ‘overhead’ for newbie students. The formula notation, however, is a common way in R to tell R to separate a quantitative variable by the levels of a factor. Thus, this function adds code for formulas to the generic hist function. This allows newbie students to use a common notation (i.e., formula) to easily create multiple histograms of a quantitative variable separated by the levels of a factor.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    3-Plotting Fundamentals.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    See also

    +
    +

    See also

    See base hist for related functionality and multhist in plotrix for similar functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com, but this implementation is largely a modification of the code provided by Marc Schwartz on the R-help mailing list on 1Jun07.

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com, but this implementation is largely a modification of the code provided by Marc Schwartz on the R-help mailing list on 1Jun07.

    -
    -

    Examples

    -
    ## Using the defaults
    -hist(Sepal.Length~Species,data=iris)
    +    
    +

    Examples

    +
    ## Using the defaults
    +hist(Sepal.Length~Species,data=iris)
     
    -
    -## Add x-labels and use a pre-fix on the main labels
    -hist(Sepal.Length~Species,data=iris,xlab="Sepal Length (cm)",
    -     pre.main="Species==")
    +
    +## Add x-labels and use a pre-fix on the main labels
    +hist(Sepal.Length~Species,data=iris,xlab="Sepal Length (cm)",
    +     pre.main="Species==")
     
    -
    -## Use different breaks and different y-axis limits for each graph
    -hist(Sepal.Length~Species,data=iris,xlab="Sepal Length (cm)",
    -     same.breaks=FALSE,same.ylim=FALSE)
    +
    +## Use different breaks and different y-axis limits for each graph
    +hist(Sepal.Length~Species,data=iris,xlab="Sepal Length (cm)",
    +     same.breaks=FALSE,same.ylim=FALSE)
     
    -
    -## Single histogram without grouping using formula notation
    -hist(~Sepal.Length,data=iris,xlab="Sepal Length (cm)")
    +
    +## Single histogram without grouping using formula notation
    +hist(~Sepal.Length,data=iris,xlab="Sepal Length (cm)")
     
    -
    -## Using the bin width argument
    -hist(~Sepal.Length,data=iris,xlab="Sepal Length (cm)",w=1)
    +
    +## Using the bin width argument
    +hist(~Sepal.Length,data=iris,xlab="Sepal Length (cm)",w=1)
     
    -hist(Sepal.Length~Species,data=iris,xlab="Sepal Length (cm)",w=0.25)
    +hist(Sepal.Length~Species,data=iris,xlab="Sepal Length (cm)",w=0.25)
     
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/histFromSum.html b/docs/reference/histFromSum.html index ab359394..42443289 100644 --- a/docs/reference/histFromSum.html +++ b/docs/reference/histFromSum.html @@ -1,128 +1,138 @@ -Create a histogram from a frequency table. — histFromSum • FSACreate a histogram from a frequency table. — histFromSum • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Creates a histogram from values in a frequency table. Primarily used with already summarized length frequency data.

    -
    -
    histFromSum(x, ...)
    -
    -# S3 method for default
    -histFromSum(x, y, ...)
    -
    -# S3 method for table
    -histFromSum(x, ...)
    -
    -# S3 method for formula
    -histFromSum(x, data = NULL, ...)
    +
    +

    Usage

    +
    histFromSum(x, ...)
    +
    +# S3 method for default
    +histFromSum(x, y, ...)
    +
    +# S3 method for table
    +histFromSum(x, ...)
    +
    +# S3 method for formula
    +histFromSum(x, data = NULL, ...)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A numeric vector of bin/category values, a formula of the form freq~cat where freq contains the count/frequency values and cat contains the bin/category values, an object of class table from table() or xtabs().

    + +
    ...

    Additional arguments for hist.

    + +
    y

    A numeric vector of count/frequency values.

    + +
    data

    A data.frame that contains the freq and cat variables if a formula is given in x.

    +
    -
    -

    Value

    -

    None, but a graphic is created.

    +
    +

    Value

    + + +

    None, but a graphic is created.

    -
    -

    Details

    +
    +

    Details

    Creates a histogram fro values in a frequency table. The frequency table may be constructed from xtabs, table, or be in the form of a matrix or a data.frame (as if read in from an external data file).

    -
    -

    See also

    +
    +

    See also

    See hist and hist.formula for related functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Make some dummy data with a length category variable
    -set.seed(634434789)
    -df <- data.frame(tl=round(rnorm(100,100,20)))
    -df$lcat10 <- lencat(df$tl,w=10)
    -
    -## Summarize as tables
    -( tbl1 <- xtabs(~lcat10,data=df) )
    +    
    +

    Examples

    +
    ## Make some dummy data with a length category variable
    +set.seed(634434789)
    +df <- data.frame(tl=round(rnorm(100,100,20)))
    +df$lcat10 <- lencat(df$tl,w=10)
    +
    +## Summarize as tables
    +( tbl1 <- xtabs(~lcat10,data=df) )
     #> lcat10
     #>  20  50  60  70  80  90 100 110 120 130 140 150 
     #>   1   1   5   7  10  24  19  19   7   3   3   1 
    -( tbl2 <- table(df$lcat10) )
    +( tbl2 <- table(df$lcat10) )
     #> 
     #>  20  50  60  70  80  90 100 110 120 130 140 150 
     #>   1   1   5   7  10  24  19  19   7   3   3   1 
    -
    -## Turn the tables into a data.frame for testing (convert
    -## the categories variables to numeric with fact2num())
    -df2 <- data.frame(tbl1)
    -df2$lcat10 <- fact2num(df2$lcat10)
    -
    -## Turn the table into a matrix for testing
    -( mat1 <- cbind(lcat10=as.numeric(rownames(tbl1)),freq=tbl1) )
    +
    +## Turn the tables into a data.frame for testing (convert
    +## the categories variables to numeric with fact2num())
    +df2 <- data.frame(tbl1)
    +df2$lcat10 <- fact2num(df2$lcat10)
    +
    +## Turn the table into a matrix for testing
    +( mat1 <- cbind(lcat10=as.numeric(rownames(tbl1)),freq=tbl1) )
     #>     lcat10 freq
     #> 20      20    1
     #> 50      50    1
    @@ -136,53 +146,49 @@ 

    Examples

    #> 130 130 3 #> 140 140 3 #> 150 150 1 - -## Histogram of the raw data ... set breaks and x-axis label -brks <- seq(20,160,10) -xlbl <- "Total Length (mm)" -hist(~tl,data=df,breaks=brks,xlab=xlbl) + +## Histogram of the raw data ... set breaks and x-axis label +brks <- seq(20,160,10) +xlbl <- "Total Length (mm)" +hist(~tl,data=df,breaks=brks,xlab=xlbl) - -## Use this function with various inputs ... changed colors -## on each plot so that it was obvious that a new plot was made. -# table from xtabs() -histFromSum(tbl1,breaks=brks,xlab=xlbl,col="gray75") + +## Use this function with various inputs ... changed colors +## on each plot so that it was obvious that a new plot was made. +# table from xtabs() +histFromSum(tbl1,breaks=brks,xlab=xlbl,col="gray75") -# table from table() -histFromSum(tbl2,breaks=brks,xlab=xlbl,col="gray70") +# table from table() +histFromSum(tbl2,breaks=brks,xlab=xlbl,col="gray70") -# vectors from data.frame -histFromSum(df2$lcat10,df2$Freq,breaks=brks,xlab=xlbl,col="gray65") +# vectors from data.frame +histFromSum(df2$lcat10,df2$Freq,breaks=brks,xlab=xlbl,col="gray65") -# vectors from matrix -histFromSum(mat1[,"lcat10"],mat1[,"freq"],breaks=brks,xlab=xlbl,col="gray60") +# vectors from matrix +histFromSum(mat1[,"lcat10"],mat1[,"freq"],breaks=brks,xlab=xlbl,col="gray60") -# formula from a data.frame -histFromSum(Freq~lcat10,data=df2,breaks=brks,xlab=xlbl,col="gray55") +# formula from a data.frame +histFromSum(Freq~lcat10,data=df2,breaks=brks,xlab=xlbl,col="gray55") - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/hyperCI.html b/docs/reference/hyperCI.html index a42c280c..1c4e5198 100644 --- a/docs/reference/hyperCI.html +++ b/docs/reference/hyperCI.html @@ -1,123 +1,129 @@ -Confidence interval for population size (N) in hypergeometric distribution. — hyperCI • FSAConfidence interval for population size (N) in hypergeometric distribution. — hyperCI • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes a confidence interval for population size (N) in hypergeometric distribution.

    -
    -
    hyperCI(M, n, m, conf.level = 0.95)
    +
    +

    Usage

    +
    hyperCI(M, n, m, conf.level = 0.95)
    -
    -

    Arguments

    +
    +

    Arguments

    M

    Number of successes in the population.

    + +
    n

    Number of observations in the sample.

    + +
    m

    Number of observed successes in the sample.

    + +
    conf.level

    Level of confidence to use for constructing confidence intervals (default is 0.95).

    +
    -
    -

    Value

    -

    A 1x2 matrix that contains the lower and upper confidence interval bounds.

    +
    +

    Value

    + + +

    A 1x2 matrix that contains the lower and upper confidence interval bounds.

    -
    -

    Details

    +
    +

    Details

    This is an inefficient brute-force algorithm. The algorithm computes the conf.level range of possible values for m, as if it was unknown, for a large range of values of N. It then finds all possible values of N for which m was in the conf.level range. The smallest and largest values of N for which m was in the conf.level range are the CI endpoints.

    -
    -

    Note

    +
    +

    Note

    This algorithm is experimental at this point.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    hyperCI(50,25,10)
    +    
    +

    Examples

    +
    hyperCI(50,25,10)
     #>      95% LCI 95% UCI
     #> [1,]      86     228
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/index.html b/docs/reference/index.html index 4660db6e..de54e39b 100644 --- a/docs/reference/index.html +++ b/docs/reference/index.html @@ -1,474 +1,653 @@ -Function reference • FSAFunction reference • FSA + + Skip to contents -
    -
    +
    +
    +
    +
    +

    Age Comparisons

    +

    Summarize and visualize bias and precision among multiple estimates of age for individual fish.

    -
    -
    - + +
    + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -

    Age Comparisons

    -

    Summarize and visualize bias and precision among multiple estimates of age for individual fish.

    -
    -

    ageBias() summary(<ageBias>) plot(<ageBias>)

    -

    Compute and view possible differences between paired sets of ages.

    -

    agePrecision() summary(<agePrec>)

    -

    Compute measures of precision among sets of ages.

    -

    plotAB()

    -

    Construct traditional (Campana-like) age-bias plots.

    -

    Age-Length Keys

    -

    Process age-length keys including assigning ages to indvidual fish and visualizing a key.

    -
    -

    alkIndivAge()

    -

    Use an age-length key to assign age to individuals in the unaged sample.

    -

    alkPlot()

    -

    Plots to visualize age-length keys.

    -

    alkAgeDist()

    -

    Proportions-at-age from an age-length key

    -

    alkMeanVar()

    -

    Mean Values-at-age from an age-length key

    -

    Size Structure

    -

    Efficiently calculate and summarize proportional size distribution (PSD) data.

    -
    -

    PSDlit

    -

    Gabelhouse five-cell length categories for various species.

    -

    psdVal()

    -

    Finds Gabelhouse lengths (for PSD calculations) for a species.

    -

    psdPlot()

    -

    Length-frequency histogram with Gabelhouse lengths highlighted.

    -

    psdCalc()

    -

    Convenience function for calculating PSD-X and PSD X-Y values.

    -

    psdCI()

    -

    Compute confidence intervals for PSD-X and PSD X-Y values.

    -

    psdAdd()

    -

    Creates a vector of Gabelhouse lengths for each species in an entire data frame.

    -

    tictactoe()

    -

    Construct a base tic-tac-toe plot for presenting predator-prey PSD values.

    -

    Condition and Weight-Length

    -

    Efficiently calculate and summarize relative weight and visualize weight-length data among groups with different slopes.

    -
    -

    WSlit

    -

    All known standard weight equations.

    -

    wsVal()

    -

    Finds standard weight equation coefficients for a particular species.

    -

    wrAdd()

    -

    Computes a vector of relative weights specific to a species in an entire data frame.

    -

    lwCompPreds()

    -

    Constructs plots of predicted weights at given lengths among different groups.

    -

    Abundance (Capture-Recapture)

    -

    Estimate abundance with capture-recapture data, including handling and summarizing data in a capture history format.

    -
    -

    mrClosed() summary(<mrClosed1>) confint(<mrClosed1>) summary(<mrClosed2>) confint(<mrClosed2>) plot(<mrClosed2>)

    -

    Estimate initial population size for single or multiple census mark-recapture data.

    -

    jolly() mrOpen() summary(<mrOpen>) confint(<mrOpen>)

    -

    Jolly-Seber analysis from multiple mark-recapture events from an open population.

    -

    capHistConvert()

    -

    Convert between capture history data.frame formats.

    -

    capHistSum() is.CapHist() plot(<CapHist>)

    -

    Summarize capture histories in individual fish format.

    -

    Abundance (Depletion)

    -

    Estimate abundance with depletion and removal data.

    -
    -

    depletion() summary(<depletion>) coef(<depletion>) confint(<depletion>) anova(<depletion>) rSquared(<depletion>) plot(<depletion>)

    -

    Computes the Leslie or DeLury population estimate from catch and effort data.

    -

    removal() summary(<removal>) confint(<removal>)

    -

    Population estimates for k-, 3-, or 2-pass removal data.

    -

    Individual Growth

    -

    Fit various growth models (von Bertalanffy, Gompertz, logistic, Richards, Schnute) to individual fish.

    -
    -

    vbFuns() GompertzFuns() RichardsFuns() logisticFuns() growthFunShow()

    -

    Creates a function for a specific parameterization of the von Bertalanffy, Gompertz, Richards, and logistic growth functions.

    -

    vbStarts()

    -

    Find reasonable starting values for a von Bertalanffy growth function.

    -

    Schnute()

    -

    The four-parameter growth function from Schnute (1981).

    -

    SchnuteRichards()

    -

    The five-parameter growth function from Schnute and Richards (1990).

    -

    Mortality Rates

    -

    Estimate mortality rates from catch curve data and empirical models.

    -
    -

    catchCurve() summary(<catchCurve>) coef(<catchCurve>) anova(<catchCurve>) confint(<catchCurve>) rSquared(<catchCurve>) plot(<catchCurve>)

    -

    Mortality estimates from the descending limb of a catch curve.

    -

    chapmanRobson() summary(<chapmanRobson>) coef(<chapmanRobson>) confint(<chapmanRobson>) plot(<chapmanRobson>)

    -

    Computes Chapman-Robson estimates of S and Z.

    -

    Mmethods() metaM() print(<metaM>)

    -

    Estimate natural mortality from a variety of empirical methods.

    -

    Stock-Recruiment

    -

    Fit various (Beverton-Holt, Ricker, Shepherd) stock-recruitment functions.

    -
    -

    srFuns() srFunShow()

    -

    Creates a function for a specific parameterization of a common stock-recruitment function .

    -

    srStarts()

    -

    Finds reasonable starting values for parameters in specific parameterizations of common stock-recruitment models.

    -

    Utilities (Fisheries-Specific)

    -

    Utilities that are likely useful only to fisheries scientists.

    -
    -

    addZeroCatch()

    -

    Adds zeros for catches of species not collected in some sampling events.

    -

    expandCounts()

    -

    Repeat individual fish data (including lengths) from tallied counts.

    -

    expandLenFreq()

    -

    Expands a length frequency based on a subsample.

    -

    lencat()

    -

    Constructs length class/category variable.

    -

    Utilities (General Statistics)

    -

    Utilities of a statistical nature that are likely to be useful to a fisheries scientist.

    -
    -

    binCI()

    -

    Confidence intervals for binomial probability of success.

    -

    hyperCI()

    -

    Confidence interval for population size (N) in hypergeometric distribution.

    -

    poiCI()

    -

    Confidence interval for Poisson counts.

    -

    confint(<boot>) htest(<boot>) predict(<boot>) hist(<boot>)

    -

    Associated S3 methods for bootstrap results from car::Boot.

    -

    dunnTest() print(<dunnTest>)

    -

    Dunn's Kruskal-Wallis Multiple Comparisons.

    -

    lrt() extraSS() print(<extraTest>)

    -

    Likelihood ratio and extra sum-of-squares tests.

    -

    geomean() geosd()

    -

    Calculates the geometric mean or geometric standard deviation.

    -

    hist(<formula>)

    -

    Creates separate histograms by levels.

    -

    histFromSum()

    -

    Create a histogram from a frequency table.

    -

    confint(<nlsBoot>) predict(<nlsBoot>) htest()

    -

    Associated S3 methods for nlsBoot from nlstools.

    -

    ksTest()

    -

    Kolmogorov-Smirnov Tests.

    -

    logbtcf()

    -

    Constructs the correction-factor used when back-transforming log-transformed values.

    -

    nlsTracePlot()

    -

    Adds model fits from nls iterations to active plot.

    -

    rcumsum() pcumsum()

    -

    Computes the prior to or reverse cumulative sum of a vector.

    -

    rSquared()

    -

    Extract the coefficient of determination from a linear model object.

    -

    se()

    -

    Computes standard error of the mean.

    -

    Summarize()

    -

    Summary statistics for a numeric variable.

    -

    sumTable()

    -

    Creates a one- or two-way table of summary statistics.

    -

    Utilities (Other)

    -

    Miscellaneous utilities.

    -
    -

    FSA

    -

    Fisheries stock assessment methods and data.

    -

    capFirst()

    -

    Capitalizes the first letter of first or all words in a string.

    -

    col2rgbt()

    -

    Converts an R color to RGB (red/green/blue) including a transparency (alpha channel).

    -

    fact2num()

    -

    Converts "numeric" factor levels to numeric values.

    -

    fishR()

    -

    Opens web pages associated with the fishR website.

    -

    headtail()

    -

    Shows rows from the head and tail of a data frame or matrix.

    -

    is.odd() is.even()

    -

    Determine if a number is odd or even.

    -

    kCounts() kPvalue() purl2() reproInfo()

    -

    Specific utilities for use in a knitr document.

    -

    lagratio()

    -

    Ratio of lagged observations.

    -

    perc()

    -

    Computes the percentage of values in a vector less than or greater than (and equal to) some value.

    -

    peek()

    -

    Peek into (show a subset of) a data frame or matrix.

    -

    repeatedRows2Keep()

    -

    Find non-repeated consecutive rows in a data.frame.

    -

    validn()

    -

    Finds the number of valid (non-NA) values in a vector.

    -

    Data

    -

    Data files.

    -
    -

    BluegillJL

    -

    Capture histories (2 samples) of Bluegill from Jewett Lake, MI.

    -

    BrookTroutTH

    -

    Catch-at-age for Tobin Harbor, Isle Royale Brook Trout.

    -

    ChinookArg

    -

    Lengths and weights for Chinook Salmon from three locations in Argentina.

    -

    CodNorwegian

    -

    Stock and recruitment data for Norwegian cod, 1937-1960.

    -

    CutthroatAL

    -

    Capture histories (9 samples) of Cutthroat Trout from Auke Lake.

    -

    Ecoli

    -

    Population growth of Escherichia coli.

    -

    Mirex

    -

    Mirex concentration, weight, capture year, and species of Lake Ontario salmon.

    -

    PikeNY

    -

    Summarized multiple mark-recapture data for all Northern Pike from Buckhorn Marsh, NY.

    -

    PikeNYPartial1

    -

    Capture histories (4 samples), in capture history format, of a subset of Northern Pike from Buckhorn Marsh, NY.

    -

    PSDlit

    -

    Gabelhouse five-cell length categories for various species.

    -

    SMBassLS

    -

    Catch-effort data for Little Silver Lake (Ont) Smallmouth Bass.

    -

    SMBassWB

    -

    Growth increment data for West Bearskin Lake, MN, Smallmouth Bass.

    -

    SpotVA1

    -

    Age and length of spot.

    -

    WhitefishLC

    -

    Assigned ages from two readers on three structures for Lake Whitefish from Lake Champlain.

    -

    WR79

    -

    Ages and lengths for a hypothetical sample from Westerheim and Ricker (1979).

    -

    WSlit

    -

    All known standard weight equations.

    -

    Defunct

    -

    Defunct functions.

    -
    -

    bootCase() chooseColors() compIntercepts() compSlopes() diags() filterD() fitPlot() fsaNews() hoCoef() mapvalues() plotBinResp() residPlot() Subset()

    -

    DEFUNCT functions.

    - - -
    +
    + + ageBias() summary(<ageBias>) plot(<ageBias>) +
    +
    Compute and view possible differences between paired sets of ages.
    +
    + + agePrecision() summary(<agePrec>) +
    +
    Compute measures of precision among sets of ages.
    +
    + + plotAB() +
    +
    Construct traditional (Campana-like) age-bias plots.
    +
    +

    Age-Length Keys

    + +

    Process age-length keys including assigning ages to indvidual fish and visualizing a key.

    + + +
    + + + +
    + + alkIndivAge() +
    +
    Use an age-length key to assign age to individuals in the unaged sample.
    +
    + + alkPlot() +
    +
    Plots to visualize age-length keys.
    +
    + + alkAgeDist() +
    +
    Proportions-at-age from an age-length key
    +
    + + alkMeanVar() +
    +
    Mean Values-at-age from an age-length key
    +
    +

    Size Structure

    + +

    Efficiently calculate and summarize proportional size distribution (PSD) data.

    -
    + + + + +
    + + PSDlit +
    +
    Gabelhouse five-cell length categories for various species.
    +
    + + psdVal() +
    +
    Finds Gabelhouse lengths (for PSD calculations) for a species.
    +
    + + psdPlot() +
    +
    Length-frequency histogram with Gabelhouse lengths highlighted.
    +
    + + psdCalc() +
    +
    Convenience function for calculating PSD-X and PSD X-Y values.
    +
    + + psdCI() +
    +
    Compute confidence intervals for PSD-X and PSD X-Y values.
    +
    + + psdAdd() +
    +
    Creates a vector of Gabelhouse lengths for each species in an entire data frame.
    +
    + + tictactoe() +
    +
    Construct a base tic-tac-toe plot for presenting predator-prey PSD values.
    +
    +

    Condition and Weight-Length

    + +

    Efficiently calculate and summarize relative weight and visualize weight-length data among groups with different slopes.

    + + +
    + + + + +
    + + WSlit +
    +
    All known standard weight equations.
    +
    + + wsVal() +
    +
    Finds standard weight equation coefficients for a particular species.
    +
    + + wrAdd() +
    +
    Computes a vector of relative weights specific to a species in an entire data frame.
    +
    + + lwCompPreds() +
    +
    Constructs plots of predicted weights at given lengths among different groups.
    +
    +

    Abundance (Capture-Recapture)

    + +

    Estimate abundance with capture-recapture data, including handling and summarizing data in a capture history format.

    + + +
    + + + + +
    + + mrClosed() summary(<mrClosed1>) confint(<mrClosed1>) summary(<mrClosed2>) confint(<mrClosed2>) plot(<mrClosed2>) +
    +
    Estimate initial population size for single or multiple census mark-recapture data.
    +
    + + jolly() mrOpen() summary(<mrOpen>) confint(<mrOpen>) +
    +
    Jolly-Seber analysis from multiple mark-recapture events from an open population.
    +
    + + capHistConvert() +
    +
    Convert between capture history data.frame formats.
    +
    + + capHistSum() is.CapHist() plot(<CapHist>) +
    +
    Summarize capture histories in individual fish format.
    +
    +

    Abundance (Depletion)

    + +

    Estimate abundance with depletion and removal data.

    + + +
    + + + + +
    + + depletion() summary(<depletion>) coef(<depletion>) confint(<depletion>) anova(<depletion>) rSquared(<depletion>) plot(<depletion>) +
    +
    Computes the Leslie or DeLury population estimate from catch and effort data.
    +
    + + removal() summary(<removal>) confint(<removal>) +
    +
    Population estimates for k-, 3-, or 2-pass removal data.
    +
    +

    Individual Growth

    + +

    Fit various growth models (von Bertalanffy, Gompertz, logistic, Richards, Schnute) to individual fish.

    + + +
    + + + + +
    + + vbFuns() GompertzFuns() RichardsFuns() logisticFuns() growthFunShow() +
    +
    Creates a function for a specific parameterization of the von Bertalanffy, Gompertz, Richards, and logistic growth functions.
    +
    + + vbStarts() +
    +
    Find reasonable starting values for a von Bertalanffy growth function.
    +
    + + Schnute() +
    +
    The four-parameter growth function from Schnute (1981).
    +
    + + SchnuteRichards() +
    +
    The five-parameter growth function from Schnute and Richards (1990).
    +
    +

    Mortality Rates

    + +

    Estimate mortality rates from catch curve data and empirical models.

    + + +
    + + + + +
    + + catchCurve() summary(<catchCurve>) coef(<catchCurve>) anova(<catchCurve>) confint(<catchCurve>) rSquared(<catchCurve>) plot(<catchCurve>) +
    +
    Mortality estimates from the descending limb of a catch curve.
    +
    + + chapmanRobson() summary(<chapmanRobson>) coef(<chapmanRobson>) confint(<chapmanRobson>) plot(<chapmanRobson>) +
    +
    Computes Chapman-Robson estimates of S and Z.
    +
    + + Mmethods() metaM() print(<metaM>) +
    +
    Estimate natural mortality from a variety of empirical methods.
    +
    +

    Stock-Recruiment

    + +

    Fit various (Beverton-Holt, Ricker, Shepherd) stock-recruitment functions.

    + + +
    + + + + +
    + + srFuns() srFunShow() +
    +
    Creates a function for a specific parameterization of a common stock-recruitment function .
    +
    + + srStarts() +
    +
    Finds reasonable starting values for parameters in specific parameterizations of common stock-recruitment models.
    +
    +

    Utilities (Fisheries-Specific)

    + +

    Utilities that are likely useful only to fisheries scientists.

    + + +
    + + + + +
    + + addZeroCatch() +
    +
    Adds zeros for catches of species not collected in some sampling events.
    +
    + + expandCounts() +
    +
    Repeat individual fish data (including lengths) from tallied counts.
    +
    + + expandLenFreq() +
    +
    Expands a length frequency based on a subsample.
    +
    + + lencat() +
    +
    Constructs length class/category variable.
    +
    +

    Utilities (General Statistics)

    + +

    Utilities of a statistical nature that are likely to be useful to a fisheries scientist.

    + + +
    + + + + +
    + + binCI() +
    +
    Confidence intervals for binomial probability of success.
    +
    + + hyperCI() +
    +
    Confidence interval for population size (N) in hypergeometric distribution.
    +
    + + poiCI() +
    +
    Confidence interval for Poisson counts.
    +
    + + confint(<boot>) htest(<boot>) predict(<boot>) hist(<boot>) +
    +
    Associated S3 methods for bootstrap results from car::Boot.
    +
    + + dunnTest() print(<dunnTest>) +
    +
    Dunn's Kruskal-Wallis Multiple Comparisons.
    +
    + + lrt() extraSS() print(<extraTest>) +
    +
    Likelihood ratio and extra sum-of-squares tests.
    +
    + + geomean() geosd() +
    +
    Calculates the geometric mean or geometric standard deviation.
    +
    + + hist(<formula>) +
    +
    Creates separate histograms by levels.
    +
    + + histFromSum() +
    +
    Create a histogram from a frequency table.
    +
    + + confint(<nlsBoot>) predict(<nlsBoot>) htest() +
    +
    Associated S3 methods for nlsBoot from nlstools.
    +
    + + ksTest() +
    +
    Kolmogorov-Smirnov Tests.
    +
    + + logbtcf() +
    +
    Constructs the correction-factor used when back-transforming log-transformed values.
    +
    + + nlsTracePlot() +
    +
    Adds model fits from nls iterations to active plot.
    +
    + + rcumsum() pcumsum() +
    +
    Computes the prior to or reverse cumulative sum of a vector.
    +
    + + rSquared() +
    +
    Extract the coefficient of determination from a linear model object.
    +
    + + se() +
    +
    Computes standard error of the mean.
    +
    + + Summarize() +
    +
    Summary statistics for a numeric variable.
    +
    + + sumTable() +
    +
    Creates a one- or two-way table of summary statistics.
    +
    +

    Utilities (Other)

    + +

    Miscellaneous utilities.

    + + +
    + + + + +
    + + FSA +
    +
    Fisheries stock assessment methods and data.
    +
    + + capFirst() +
    +
    Capitalizes the first letter of first or all words in a string.
    +
    + + col2rgbt() +
    +
    Converts an R color to RGB (red/green/blue) including a transparency (alpha channel).
    +
    + + fact2num() +
    +
    Converts "numeric" factor levels to numeric values.
    +
    + + fishR() +
    +
    Opens web pages associated with the fishR website.
    +
    + + headtail() +
    +
    Shows rows from the head and tail of a data frame or matrix.
    +
    + + is.odd() is.even() +
    +
    Determine if a number is odd or even.
    +
    + + kCounts() kPvalue() purl2() reproInfo() +
    +
    Specific utilities for use in a knitr document.
    +
    + + lagratio() +
    +
    Ratio of lagged observations.
    +
    + + perc() +
    +
    Computes the percentage of values in a vector less than or greater than (and equal to) some value.
    +
    + + peek() +
    +
    Peek into (show a subset of) a data frame or matrix.
    +
    + + repeatedRows2Keep() +
    +
    Find non-repeated consecutive rows in a data.frame.
    +
    + + validn() +
    +
    Finds the number of valid (non-NA) values in a vector.
    +
    +

    Data

    + +

    Data files.

    + + +
    + + + + +
    + + BluegillJL +
    +
    Capture histories (2 samples) of Bluegill from Jewett Lake, MI.
    +
    + + BrookTroutTH +
    +
    Catch-at-age for Tobin Harbor, Isle Royale Brook Trout.
    +
    + + ChinookArg +
    +
    Lengths and weights for Chinook Salmon from three locations in Argentina.
    +
    + + CodNorwegian +
    +
    Stock and recruitment data for Norwegian cod, 1937-1960.
    +
    + + CutthroatAL +
    +
    Capture histories (9 samples) of Cutthroat Trout from Auke Lake.
    +
    + + Ecoli +
    +
    Population growth of Escherichia coli.
    +
    + + Mirex +
    +
    Mirex concentration, weight, capture year, and species of Lake Ontario salmon.
    +
    + + PikeNY +
    +
    Summarized multiple mark-recapture data for all Northern Pike from Buckhorn Marsh, NY.
    +
    + + PikeNYPartial1 +
    +
    Capture histories (4 samples), in capture history format, of a subset of Northern Pike from Buckhorn Marsh, NY.
    +
    + + PSDlit +
    +
    Gabelhouse five-cell length categories for various species.
    +
    + + SMBassLS +
    +
    Catch-effort data for Little Silver Lake (Ont) Smallmouth Bass.
    +
    + + SMBassWB +
    +
    Growth increment data for West Bearskin Lake, MN, Smallmouth Bass.
    +
    + + SpotVA1 +
    +
    Age and length of spot.
    +
    + + WhitefishLC +
    +
    Assigned ages from two readers on three structures for Lake Whitefish from Lake Champlain.
    +
    + + WR79 +
    +
    Ages and lengths for a hypothetical sample from Westerheim and Ricker (1979).
    +
    + + WSlit +
    +
    All known standard weight equations.
    +
    +

    Defunct

    + +

    Defunct functions.

    + + +
    +
    + + +
    -
    -

    Site built with pkgdown 2.0.2.

    + -
    +
    - diff --git a/docs/reference/knitUtil.html b/docs/reference/knitUtil.html index 4d0f25f0..1e519595 100644 --- a/docs/reference/knitUtil.html +++ b/docs/reference/knitUtil.html @@ -1,216 +1,260 @@ -Specific utilities for use in a knitr document. — kCounts • FSASpecific utilities for use in a knitr document. — kCounts • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Specific utilities for pretty printing various items in a knitr document.

    -
    -
    kCounts(value, capitalize = FALSE)
    -
    -kPvalue(value, digits = 4, include.p = TRUE, latex = TRUE)
    -
    -purl2(
    -  file,
    -  out.dir = NULL,
    -  newname = NULL,
    -  topnotes = NULL,
    -  moreItems = NULL,
    -  blanks = c("extra", "all", "none"),
    -  delHeader = NULL,
    -  timestamp = TRUE,
    -  ...
    -)
    -
    -reproInfo(
    -  out = c("r", "markdown", "latex"),
    -  rqrdPkgs = NULL,
    -  elapsed = NULL,
    -  width = 0.95 * getOption("width"),
    -  addTOC = TRUE,
    -  newpage = FALSE,
    -  links = NULL,
    -  closeGraphics = TRUE,
    -  ind = 1
    -)
    +
    +

    Usage

    +
    kCounts(value, capitalize = FALSE)
    +
    +kPvalue(value, digits = 4, include.p = TRUE, latex = TRUE)
    +
    +purl2(
    +  file,
    +  out.dir = NULL,
    +  newname = NULL,
    +  topnotes = NULL,
    +  moreItems = NULL,
    +  blanks = c("extra", "all", "none"),
    +  delHeader = NULL,
    +  timestamp = TRUE,
    +  ...
    +)
    +
    +reproInfo(
    +  out = c("r", "markdown", "latex"),
    +  rqrdPkgs = NULL,
    +  elapsed = NULL,
    +  width = 0.95 * getOption("width"),
    +  addTOC = TRUE,
    +  newpage = FALSE,
    +  links = NULL,
    +  closeGraphics = TRUE,
    +  ind = 1
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    value

    A single numeric count or p-value.

    + +
    capitalize

    A logical that indicates if the returned words should be capitalized or not (the default).

    + +
    digits

    Number of decimal places to round the values to.

    + +
    include.p

    A logical that indicates whether the result should be a character string with “p=” appended to the numerical result.

    + +
    latex

    A logical that indicates whether the resultant p-value string should be contained within dollar signs to form a latex formula.

    + +
    file

    A string that contains the root name of the .RNW file. This will also be the name of the resultant purled file with .R appended.

    + +
    out.dir

    A string that indicates the directory structure in which the purled file should be located. This should not have a forward slash at the end.

    + +
    newname

    A string for the output filename (without the extension) from purl2.

    + +
    topnotes

    A character vector of lines to be added to the top of the output file. Each value in the vector will be placed on a single line at the top of the output file.

    + +
    moreItems

    A string that contains additional words that when found in the purled file will result in the entire line with those words to be deleted.

    + +
    blanks

    A string that indicates if blank lines should be removed. If blanks="all" then all blank lines will be removed. If blanks="extra" then only “extra” blanks lines will be removed (i.e., one blank line will be left where there was originally more than one blank line).

    + +
    delHeader

    A single character that denotes the top and bottom of a block of lines that should be deleted from the script created by purl2.

    + +
    timestamp

    A logical that indicates whether a timestamp comment should be appended to the bottom of the script created by purl2.

    + +
    ...

    Additional arguments for the original purl.

    + +
    out

    A string that indicates the type of output from reproInfo -- Markdown, LaTeX, or simple R code.

    + +
    rqrdPkgs

    A string vector that contains packages that are required for the vignette and for which all dependencies should be found.

    + +
    elapsed

    A numeric, usually from proc.time, that is the time required to run the vignette. If NULL then this output will not be used. See the note below.

    + +
    width

    A numeric that indicates the width to use for wrapping the reproducibility information when out="r".

    + +
    addTOC

    A logical that indicates whether or not a table of contents entry for the reproducibility section should be added to the LaTeX output. Used only if Rout="latex"

    + +
    newpage

    A logical that indicates whether or not the reproducibility information should begin on a new page. Used only if Rout="latex"

    + +
    links

    A named character vector that will add a links bullet to the reproducibility information. The names will be shown and the values are the links. Used only if Rout="markdown.

    + +
    closeGraphics

    A logical that indicates whether the graphics device should be closed or not.

    + +
    ind

    An integer that indicates the CRAN mirror to use. Defaults to 1.

    +
    -
    -

    Value

    -
    • kCounts returns a numeric value if the count is less than zero or greater than ten and returns a character string of the number ‘name’. See the examples.

    • +
      +

      Value

      + + +
      • kCounts returns a numeric value if the count is less than zero or greater than ten and returns a character string of the number ‘name’. See the examples.

      • kPvalue returns a character string of the supplied p-value rounded to the requested number of digits or a character string that indicates what the p-value is less than the value with a ‘5’ in the digits+1 place. See the examples.

      • purl2 is a modification of purl from knitr that creates a file with the same name as file but with lines removed that contain certain words (those found in ItemsToRemove and moreItems).

      • reproInfo returns Markdown, LaTeX, or R code that prints “reproducibility information” at the bottom of the knitted document.

      -
      -

      Details

      +
      +

      Details

      • kCounts is used to convert numeric numbers to ‘word’ numbers in a sentence.

      • kPvalue is used to print ‘pretty’ p-values.

      • purl2 is used to create a modified (see below) Stangled or purled script.

      • reproInfo is used to print ‘reproducibility information’ for the document.

      -
      -

      Note

      +
      +

      Note

      In reproInfo, elapsed can be used to print the time it took to process the document by sending the elapsed time for processing to this argument. The simplest way to get an approximate elapsed time is to put st <- proc.time() very early (first line?) in your knitr code, put et <- proc.time()-st very late in your knitr code (i.e., just prior to reproInfo), and then used elapsed=et["user.self"]+et["sys.self"] in reproInfo.

      -
      -

      See also

      +
      +

      See also

      See formatC for functionality similar to kPvalue. See purl and knit in knitr for functionality similar to purl2.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      kCounts(7)
      +    
      +

      Examples

      +
      kCounts(7)
       #> [1] "seven"
      -kCounts(17)
      +kCounts(17)
       #> [1] 17
      -kCounts(0)
      +kCounts(0)
       #> [1] "zero"
      -kCounts(-6)
      +kCounts(-6)
       #> [1] -6
      -kCounts(3,capitalize=TRUE)
      +kCounts(3,capitalize=TRUE)
       #> [1] "Three"
      -
      -kPvalue(0.123456789)
      +
      +kPvalue(0.123456789)
       #> [1] "$p=0.1235$"
      -kPvalue(0.000123456)
      +kPvalue(0.000123456)
       #> [1] "$p=0.0001$"
      -kPvalue(0.000012345)
      +kPvalue(0.000012345)
       #> [1] "$p<0.00005$"
      -kPvalue(0.000012345,include.p=FALSE)
      +kPvalue(0.000012345,include.p=FALSE)
       #> [1] "$<0.00005$"
      -kPvalue(0.000012345,include.p=FALSE,latex=FALSE)
      +kPvalue(0.000012345,include.p=FALSE,latex=FALSE)
       #> [1] "<0.00005"
      -
      +
       
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/ksTest.html b/docs/reference/ksTest.html index 905f9d26..7b0d0c9f 100644 --- a/docs/reference/ksTest.html +++ b/docs/reference/ksTest.html @@ -1,194 +1,204 @@ -Kolmogorov-Smirnov Tests. — ksTest • FSAKolmogorov-Smirnov Tests. — ksTest • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Performs a one- or two-sample Kolmogorov-Smirnov test. Includes the option to perform the two-sample test using the formula notation.

    -
    -
    ksTest(x, ...)
    -
    -# S3 method for default
    -ksTest(
    -  x,
    -  y,
    -  ...,
    -  alternative = c("two.sided", "less", "greater"),
    -  exact = NULL
    -)
    -
    -# S3 method for formula
    -ksTest(
    -  x,
    -  data = NULL,
    -  ...,
    -  alternative = c("two.sided", "less", "greater"),
    -  exact = NULL
    -)
    +
    +

    Usage

    +
    ksTest(x, ...)
    +
    +# S3 method for default
    +ksTest(
    +  x,
    +  y,
    +  ...,
    +  alternative = c("two.sided", "less", "greater"),
    +  exact = NULL
    +)
    +
    +# S3 method for formula
    +ksTest(
    +  x,
    +  data = NULL,
    +  ...,
    +  alternative = c("two.sided", "less", "greater"),
    +  exact = NULL
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A numeric vector of data values or a formula (see details).

    + +
    ...

    Parameters of the distribution specified (as a character string) by y.

    + +
    y

    A numeric vector of data values, a character string naming a cumulative distribution function, or an actual cumulative distribution function. See ks.test.

    + +
    alternative

    A string that indicates the alternative hypothesis. See ks.test.

    + +
    exact

    NULL or a logical that indicates whether an exact p-value should be computed. See ks.test. Not available if ties are present, nor for the one-sided two-sample case.

    + +
    data

    A data frame that contains the variables in the formula for x.

    +
    -
    -

    Value

    -

    See ks.test.

    +
    +

    Value

    + + +

    See ks.test.

    -
    -

    Details

    +
    +

    Details

    This is exactly ks.test except that a formula may be used for the two-sample situation. The default version is simply a pass through to ks.test. See ks.test for more details.

    -
    -

    See also

    +
    +

    See also

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## see ks.test for other examples
    -x <- rnorm(50)
    -y <- runif(30)
    -df <- data.frame(dat=c(x,y),
    -                 grp=factor(rep(c("x","y"),c(50,30))),
    -                 stringsAsFactors=FALSE)
    -
    -## one-sample (from ks.test) still works
    -ksTest(x+2, "pgamma", 3, 2)
    +    
    +

    Examples

    +
    ## see ks.test for other examples
    +x <- rnorm(50)
    +y <- runif(30)
    +df <- data.frame(dat=c(x,y),
    +                 grp=factor(rep(c("x","y"),c(50,30))),
    +                 stringsAsFactors=FALSE)
    +
    +## one-sample (from ks.test) still works
    +ksTest(x+2, "pgamma", 3, 2)
     #> 
    -#> 	One-sample Kolmogorov-Smirnov test
    +#> 	Exact one-sample Kolmogorov-Smirnov test
     #> 
     #> data:  x
     #> D = 0.21652, p-value = 0.01546
     #> alternative hypothesis: two-sided
     #> 
    -ks.test(x+2, "pgamma", 3, 2)
    +ks.test(x+2, "pgamma", 3, 2)
     #> 
    -#> 	One-sample Kolmogorov-Smirnov test
    +#> 	Exact one-sample Kolmogorov-Smirnov test
     #> 
     #> data:  x + 2
     #> D = 0.21652, p-value = 0.01546
     #> alternative hypothesis: two-sided
     #> 
    -
    -## first two-sample example in ?ks.test
    -ksTest(x,y)
    +
    +## first two-sample example in ?ks.test
    +ksTest(x,y)
     #> 
    -#> 	Two-sample Kolmogorov-Smirnov test
    +#> 	Exact two-sample Kolmogorov-Smirnov test
     #> 
     #> data:  x and y
     #> D = 0.6, p-value = 8.598e-07
     #> alternative hypothesis: two-sided
     #> 
    -ks.test(x,y)
    +ks.test(x,y)
     #> 
    -#> 	Two-sample Kolmogorov-Smirnov test
    +#> 	Exact two-sample Kolmogorov-Smirnov test
     #> 
     #> data:  x and y
     #> D = 0.6, p-value = 8.598e-07
     #> alternative hypothesis: two-sided
     #> 
    -
    -## same as above but using data.frame and formula
    -ksTest(dat~grp,data=df)
    +
    +## same as above but using data.frame and formula
    +ksTest(dat~grp,data=df)
     #> 
    -#> 	Two-sample Kolmogorov-Smirnov test
    +#> 	Exact two-sample Kolmogorov-Smirnov test
     #> 
     #> data:  x and y
     #> D = 0.6, p-value = 8.598e-07
     #> alternative hypothesis: two-sided
     #> 
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/lagratio.html b/docs/reference/lagratio.html index 4b952fd7..9e2e0a67 100644 --- a/docs/reference/lagratio.html +++ b/docs/reference/lagratio.html @@ -1,158 +1,168 @@ -Ratio of lagged observations. — lagratio • FSARatio of lagged observations. — lagratio • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes the ratio of lagged observations in a vector.

    -
    -
    lagratio(
    -  x,
    -  lag = 1L,
    -  recursion = 1L,
    -  differences = recursion,
    -  direction = c("backward", "forward"),
    -  ...
    -)
    +
    +

    Usage

    +
    lagratio(
    +  x,
    +  lag = 1L,
    +  recursion = 1L,
    +  differences = recursion,
    +  direction = c("backward", "forward"),
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A numeric vector or matrix.

    + +
    lag

    An integer representing the lag ‘distance’.

    + +
    recursion

    An integer that indicates the level of recursion for the calculations. A 1 will simply compute the ratios. A 2, for example, will compute the ratios, save the result, and then compute the ratios of the results using the same lag. See examples.

    + +
    differences

    Same as recursion. Used for symmetry with diff.

    + +
    direction

    A string that indicates the direction of calculation. A "backward" indicates that ‘latter’ values are divided by ‘former’ values. A "forward" indicates that ‘former’ values are divided by ‘latter’ values. See examples.

    + +
    ...

    Additional arguments to diff().

    +
    -
    -

    Value

    -

    A vector or matrix of lagged ratios.

    +
    +

    Value

    + + +

    A vector or matrix of lagged ratios.

    -
    -

    Details

    +
    +

    Details

    This function behaves similarly to diff() except that it returns a vector or matrix of ratios rather than differences.

    -
    -

    See also

    +
    +

    See also

    diff

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Backward lagged ratios
    -# no recursion
    -lagratio(1:10,1)
    +    
    +

    Examples

    +
    ## Backward lagged ratios
    +# no recursion
    +lagratio(1:10,1)
     #> [1] 2.000000 1.500000 1.333333 1.250000 1.200000 1.166667 1.142857 1.125000
     #> [9] 1.111111
    -lagratio(1:10,2)
    +lagratio(1:10,2)
     #> [1] 3.000000 2.000000 1.666667 1.500000 1.400000 1.333333 1.285714 1.250000
    -# with recursion
    -lagratio(1:10,1,2)
    +# with recursion
    +lagratio(1:10,1,2)
     #> [1] 0.7500000 0.8888889 0.9375000 0.9600000 0.9722222 0.9795918 0.9843750
     #> [8] 0.9876543
    -lagratio(1:10,2,2)
    +lagratio(1:10,2,2)
     #> [1] 0.5555556 0.7500000 0.8400000 0.8888889 0.9183673 0.9375000
    -
    -## Forward lagged ratios
    -# no recursion
    -lagratio(10:1,1,direction="forward")
    +
    +## Forward lagged ratios
    +# no recursion
    +lagratio(10:1,1,direction="forward")
     #> [1] 1.111111 1.125000 1.142857 1.166667 1.200000 1.250000 1.333333 1.500000
     #> [9] 2.000000
    -lagratio(10:1,2,direction="forward")
    +lagratio(10:1,2,direction="forward")
     #> [1] 1.250000 1.285714 1.333333 1.400000 1.500000 1.666667 2.000000 3.000000
    -# with recursion
    -lagratio(10:1,1,2,direction="forward")
    +# with recursion
    +lagratio(10:1,1,2,direction="forward")
     #> [1] 0.9876543 0.9843750 0.9795918 0.9722222 0.9600000 0.9375000 0.8888889
     #> [8] 0.7500000
    -lagratio(10:1,2,2,direction="forward")
    +lagratio(10:1,2,2,direction="forward")
     #> [1] 0.9375000 0.9183673 0.8888889 0.8400000 0.7500000 0.5555556
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/lencat.html b/docs/reference/lencat.html index d27ab48f..b1362151 100644 --- a/docs/reference/lencat.html +++ b/docs/reference/lencat.html @@ -1,254 +1,278 @@ -Constructs length class/category variable. — lencat • FSAConstructs length class/category variable. — lencat • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Constructs a vector that contains the length class or category to which an individual belongs. Optionally, that vector can be appended to the original data frame.

    -
    -
    lencat(x, ...)
    -
    -# S3 method for default
    -lencat(
    -  x,
    -  w = 1,
    -  startcat = NULL,
    -  breaks = NULL,
    -  right = FALSE,
    -  use.names = FALSE,
    -  as.fact = use.names,
    -  droplevels = drop.levels,
    -  drop.levels = FALSE,
    -  ...
    -)
    -
    -# S3 method for formula
    -lencat(
    -  x,
    -  data,
    -  w = 1,
    -  startcat = NULL,
    -  breaks = NULL,
    -  right = FALSE,
    -  use.names = FALSE,
    -  as.fact = use.names,
    -  droplevels = drop.levels,
    -  drop.levels = FALSE,
    -  vname = NULL,
    -  ...
    -)
    +
    +

    Usage

    +
    lencat(x, ...)
    +
    +# S3 method for default
    +lencat(
    +  x,
    +  w = 1,
    +  startcat = NULL,
    +  breaks = NULL,
    +  right = FALSE,
    +  use.names = FALSE,
    +  as.fact = use.names,
    +  droplevels = drop.levels,
    +  drop.levels = FALSE,
    +  ...
    +)
    +
    +# S3 method for formula
    +lencat(
    +  x,
    +  data,
    +  w = 1,
    +  startcat = NULL,
    +  breaks = NULL,
    +  right = FALSE,
    +  use.names = FALSE,
    +  as.fact = use.names,
    +  droplevels = drop.levels,
    +  drop.levels = FALSE,
    +  vname = NULL,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A numeric vector that contains the length measurements or a formula of the form ~x where “x” generically represents a variable in data that contains length measurements. This formula can only contain one variable.

    + +
    ...

    Not implemented.

    + +
    w

    A single numeric that indicates the width of length categories to create. Ignored if breaks is not NULL.

    + +
    startcat

    A single numeric that indicates the beginning of the first length category. Only used with w. See details for how this is handled when NULL.

    + +
    breaks

    A numeric vector of lower values for the break points of the length categories.

    + +
    right

    A logical that indicates if the intervals should be closed on the right (and open on the left) or vice versa.

    + +
    use.names

    A logical that indicates whether the names for the values in breaks should be used for the levels in the new variable. Will throw a warning and then use default levels if TRUE but names(breaks) is NULL.

    + +
    as.fact

    A logical that indicates that the new variable should be returned as a factor (=TRUE) or not (=FALSE; default).

    + +
    droplevels, drop.levels

    A logical that indicates that the new variable should retain all levels indicated in breaks (=FALSE; default) or not. Ignored if as.fact=FALSE.

    + +
    data

    A data.frame that minimally contains the length measurements given in the variable in the formula.

    + +
    vname

    A string that contains the name for the new length class variable.

    +
    -
    -

    Value

    -

    If the formula version of the function is used, then a data.frame is returned with the a new variable, named as in vname (defaults to LCat), appended to the original data.frame. If the default version of the function is used, then a single vector is returned. The returned values will be numeric unless breaks is named and use.names=TRUE or if as.fact=TRUE.

    +
    +

    Value

    + + +

    If the formula version of the function is used, then a data.frame is returned with the a new variable, named as in vname (defaults to LCat), appended to the original data.frame. If the default version of the function is used, then a single vector is returned. The returned values will be numeric unless breaks is named and use.names=TRUE or if as.fact=TRUE.

    -
    -

    Details

    +
    +

    Details

    If breaks is non-NULL, then w and startcat will be ignored. The vector of values in breaks should begin with a value smaller than the minimum observed value and end with a value larger than the maximum observed value. If the lowest break value is larger than the minimum observed value, then an error will occur. If the largest break value is smaller than the maximum observed value, then an additional break value larger than the maximum observed value will be added to breaks (and a warning will be sent). The values in breaks do not have to be equally spaced.

    If breaks=NULL (the default), then the value in w is used to create equally spaced categories. If startcat=NULL (the default), then the length categories will begin with the first value less than the minimum observed value “rounded” by w. For example, if the minimum observed value is 67, then the first length category will be 65 if w=5, 60 if w=10, 50 if w=25, and 50 if w=50. The length categories will continue from this starting value by values of w until a value greater than the largest observed value in x. The length categories are left-inclusive and right-exclusive by default (i.e., right=FALSE).

    The start of the length categories may also be set with startcat. The number in the startcat argument should be less than the smallest value in x. Additionally, the number of decimals in startcat should not be more than the number of decimals in w (e.g., startcat=0.4 and w=1 will result in an error).

    One may want to convert apparent numeric values to factor values if some of the length categories are missing (e.g., if factor values are used, for example, then tables of the length category values will have values for all length categories; i.e., it will have zeros for the length categories that are missing). The numeric values can be converted to factors by including as.fact. See the “real data” example.

    The observed values in x should be rounded to the appropriate number of decimals to avoid misplacement of individuals into incorrect length categories due to issues with machine-precision (see discussion in all.equal.)

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    2-Data Manipulation.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    # Create random lengths measured to nearest 0.1 unit
    -df1 <- data.frame(len=round(runif(50,0.1,9.9),1))
    -
    -# Create length categories by 0.1 unit
    -df1$LCat1 <- lencat(df1$len,w=0.1)
    -xtabs(~LCat1,data=df1)
    +    
    +

    Examples

    +
    # Create random lengths measured to nearest 0.1 unit
    +df1 <- data.frame(len=round(runif(50,0.1,9.9),1))
    +
    +# Create length categories by 0.1 unit
    +df1$LCat1 <- lencat(df1$len,w=0.1)
    +xtabs(~LCat1,data=df1)
     #> LCat1
     #> 0.2 0.9 1.3 1.5 1.6 1.7 2.1 2.8 2.9   3 3.3 3.8 4.1 4.2 4.4 4.5 4.6 4.9 5.1 5.7 
     #>   1   1   2   1   1   2   1   1   2   2   1   1   1   1   1   2   1   1   2   1 
     #> 6.2 6.3 6.4 7.1 7.2 7.3 7.6 7.7 7.9 8.3 8.6 8.7 9.3 9.8 
     #>   1   2   1   2   1   2   1   1   1   5   2   2   1   2 
    -
    -# length categories by 0.2 units
    -df1$LCat2 <- lencat(df1$len,w=0.2)
    -xtabs(~LCat2,data=df1)
    +
    +# length categories by 0.2 units
    +df1$LCat2 <- lencat(df1$len,w=0.2)
    +xtabs(~LCat2,data=df1)
     #> LCat2
     #> 0.2 0.8 1.2 1.4 1.6   2 2.8   3 3.2 3.8   4 4.2 4.4 4.6 4.8   5 5.6 6.2 6.4   7 
     #>   1   1   2   1   3   1   3   2   1   1   1   1   3   1   1   2   1   3   1   2 
     #> 7.2 7.6 7.8 8.2 8.6 9.2 9.8 
     #>   3   2   1   5   4   1   2 
    -
    -# length categories by 0.2 units starting at 0.1
    -df1$LCat3 <- lencat(df1$len,w=0.2,startcat=0.1)
    -xtabs(~LCat3,data=df1)
    +
    +# length categories by 0.2 units starting at 0.1
    +df1$LCat3 <- lencat(df1$len,w=0.2,startcat=0.1)
    +xtabs(~LCat3,data=df1)
     #> LCat3
     #> 0.1 0.9 1.3 1.5 1.7 2.1 2.7 2.9 3.3 3.7 4.1 4.3 4.5 4.9 5.1 5.7 6.1 6.3 7.1 7.3 
     #>   1   1   2   2   2   1   1   4   1   1   2   1   3   1   2   1   1   3   3   2 
     #> 7.5 7.7 7.9 8.3 8.5 8.7 9.3 9.7 
     #>   1   1   1   5   2   2   1   2 
    -
    -# length categories as set by breaks
    -df1$LCat4 <- lencat(df1$len,breaks=c(0,2,4,7,10))
    -xtabs(~LCat4,data=df1)
    +
    +# length categories as set by breaks
    +df1$LCat4 <- lencat(df1$len,breaks=c(0,2,4,7,10))
    +xtabs(~LCat4,data=df1)
     #> LCat4
     #>  0  2  4  7 
     #>  8  8 14 20 
    -
    -## A Second example
    -# random lengths measured to nearest unit
    -df2 <- data.frame(len=round(runif(50,10,117),0))    
    -
    -# length categories by 5 units
    -df2$LCat1 <- lencat(df2$len,w=5)
    -xtabs(~LCat1,data=df2)
    +
    +## A Second example
    +# random lengths measured to nearest unit
    +df2 <- data.frame(len=round(runif(50,10,117),0))    
    +
    +# length categories by 5 units
    +df2$LCat1 <- lencat(df2$len,w=5)
    +xtabs(~LCat1,data=df2)
     #> LCat1
     #>  10  15  20  25  30  35  40  45  50  65  70  75  80  85  90  95 100 105 110 115 
     #>   3   4   3   2   1   1   2   2   4   3   2   1   2   2   3   4   2   4   4   1 
    -
    -# length categories by 5 units starting at 7
    -df2$LCat2 <- lencat(df2$len,w=5,startcat=7)
    -xtabs(~LCat2,data=df2)
    +
    +# length categories by 5 units starting at 7
    +df2$LCat2 <- lencat(df2$len,w=5,startcat=7)
    +xtabs(~LCat2,data=df2)
     #> LCat2
     #>   7  12  17  22  27  32  37  42  47  52  62  67  72  77  82  87  92  97 102 107 
     #>   2   2   5   1   2   1   1   3   3   2   2   1   2   1   3   1   5   2   3   6 
     #> 112 
     #>   2 
    -
    -# length categories by 10 units
    -df2$LCat3 <- lencat(df2$len,w=10)
    -xtabs(~LCat3,data=df2)
    +
    +# length categories by 10 units
    +df2$LCat3 <- lencat(df2$len,w=10)
    +xtabs(~LCat3,data=df2)
     #> LCat3
     #>  10  20  30  40  50  60  70  80  90 100 110 
     #>   7   5   2   4   4   3   3   4   7   6   5 
    -
    -# length categories by 10 units starting at 5
    -df2$LCat4 <- lencat(df2$len,w=10,startcat=5)
    -xtabs(~LCat4,data=df2)
    +
    +# length categories by 10 units starting at 5
    +df2$LCat4 <- lencat(df2$len,w=10,startcat=5)
    +xtabs(~LCat4,data=df2)
     #> LCat4
     #>   5  15  25  35  45  65  75  85  95 105 115 
     #>   3   7   3   3   6   5   3   5   6   8   1 
    -
    -# length categories as set by breaks
    -df2$LCat5 <- lencat(df2$len,breaks=c(5,50,75,150))
    -xtabs(~LCat5,data=df2)
    +
    +# length categories as set by breaks
    +df2$LCat5 <- lencat(df2$len,breaks=c(5,50,75,150))
    +xtabs(~LCat5,data=df2)
     #> LCat5
     #>  5 50 75 
     #> 18  9 23 
    -
    -## A Third example
    -# random lengths measured to nearest 0.1 unit
    -df3 <- data.frame(len=round(runif(50,10,117),1))
    -
    -# length categories by 5 units
    -df3$LCat1 <- lencat(df3$len,w=5)
    -xtabs(~LCat1,data=df3)
    +
    +## A Third example
    +# random lengths measured to nearest 0.1 unit
    +df3 <- data.frame(len=round(runif(50,10,117),1))
    +
    +# length categories by 5 units
    +df3$LCat1 <- lencat(df3$len,w=5)
    +xtabs(~LCat1,data=df3)
     #> LCat1
     #>  10  15  20  25  30  35  40  45  50  55  60  65  70  75  80  85  90  95 100 105 
     #>   1   1   1   1   1   2   6   3   1   1   7   3   3   2   2   5   1   5   1   3 
    -
    -## A Fourth example
    -# random lengths measured to nearest 0.01 unit
    -df4 <- data.frame(len=round(runif(50,0.1,9.9),2))
    -
    -# length categories by 0.1 unit
    -df4$LCat1 <- lencat(df4$len,w=0.1)
    -xtabs(~LCat1,data=df4)
    +
    +## A Fourth example
    +# random lengths measured to nearest 0.01 unit
    +df4 <- data.frame(len=round(runif(50,0.1,9.9),2))
    +
    +# length categories by 0.1 unit
    +df4$LCat1 <- lencat(df4$len,w=0.1)
    +xtabs(~LCat1,data=df4)
     #> LCat1
     #> 0.1 0.7 1.2 1.8   2 2.2 2.3 2.7 2.9   3 3.1 3.2 3.3 3.4 3.6 3.9   4 4.2 4.3 4.4 
     #>   1   1   2   1   1   1   1   2   1   1   1   1   1   2   1   3   1   1   2   1 
     #> 4.5 4.7 4.8   5 5.1 5.3 5.6 6.1 6.4 6.6 6.7 6.8 7.4 7.7 8.2 8.4 8.8 8.9 
     #>   2   2   1   1   1   1   3   1   1   1   2   1   1   1   1   1   2   1 
    -
    -# length categories by 0.1 unit, but without missing categories
    -df4$LCat2 <- lencat(df4$len,w=0.1,as.fact=TRUE)
    -xtabs(~LCat2,data=df4)
    +
    +# length categories by 0.1 unit, but without missing categories
    +df4$LCat2 <- lencat(df4$len,w=0.1,as.fact=TRUE)
    +xtabs(~LCat2,data=df4)
     #> LCat2
     #> 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9   1 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9   2 
     #>   1   0   0   0   0   0   1   0   0   0   0   2   0   0   0   0   0   1   0   1 
    @@ -260,21 +284,21 @@ 

    Examples

    #> 1 0 0 1 0 1 2 1 0 0 0 0 0 1 0 0 1 0 0 0 #> 8.1 8.2 8.3 8.4 8.5 8.6 8.7 8.8 8.9 #> 0 1 0 1 0 0 0 2 1 - -# length categories by 2 unit -df4$LCat3 <- lencat(df4$len,w=2) -xtabs(~LCat3,data=df4) + +# length categories by 2 unit +df4$LCat3 <- lencat(df4$len,w=2) +xtabs(~LCat3,data=df4) #> LCat3 #> 0 2 4 6 8 #> 5 16 16 8 5 - -## A Fifth example -- with real data -# remove variables with "anu" and "radcap" just for simplicity -smb1 <- smb2 <- SMBassWB[,-c(8:20)] - -# 10 mm length classes - in default LCat variable -smb1$LCat10 <- lencat(smb1$lencap,w=10) -head(smb1) + +## A Fifth example -- with real data +# remove variables with "anu" and "radcap" just for simplicity +smb1 <- smb2 <- SMBassWB[,-c(8:20)] + +# 10 mm length classes - in default LCat variable +smb1$LCat10 <- lencat(smb1$lencap,w=10) +head(smb1) #> species lake gear yearcap fish agecap lencap LCat10 #> 1 SMB WB E 1988 5 1 71 70 #> 2 SMB WB E 1988 3 1 64 60 @@ -282,16 +306,16 @@

    Examples

    #> 4 SMB WB E 1988 4 1 68 60 #> 5 SMB WB E 1988 6 1 72 70 #> 6 SMB WB E 1988 7 1 80 80 -xtabs(~LCat10,data=smb1) +xtabs(~LCat10,data=smb1) #> LCat10 #> 50 60 70 80 90 100 110 120 130 140 150 160 170 180 190 200 210 220 230 240 #> 3 4 7 5 7 20 21 23 23 23 35 22 7 12 21 21 23 15 19 18 #> 250 260 270 280 290 300 310 320 330 340 360 440 #> 19 23 17 22 17 4 4 4 2 1 2 1 - -# Same as previous but returned as factor so levels with no fish still seen -smb1$LCat10A <- lencat(smb1$lencap,w=10,as.fact=TRUE) -head(smb1) + +# Same as previous but returned as factor so levels with no fish still seen +smb1$LCat10A <- lencat(smb1$lencap,w=10,as.fact=TRUE) +head(smb1) #> species lake gear yearcap fish agecap lencap LCat10 LCat10A #> 1 SMB WB E 1988 5 1 71 70 70 #> 2 SMB WB E 1988 3 1 64 60 60 @@ -299,16 +323,16 @@

    Examples

    #> 4 SMB WB E 1988 4 1 68 60 60 #> 5 SMB WB E 1988 6 1 72 70 70 #> 6 SMB WB E 1988 7 1 80 80 80 -xtabs(~LCat10A,data=smb1) +xtabs(~LCat10A,data=smb1) #> LCat10A #> 50 60 70 80 90 100 110 120 130 140 150 160 170 180 190 200 210 220 230 240 #> 3 4 7 5 7 20 21 23 23 23 35 22 7 12 21 21 23 15 19 18 #> 250 260 270 280 290 300 310 320 330 340 350 360 370 380 390 400 410 420 430 440 #> 19 23 17 22 17 4 4 4 2 1 0 2 0 0 0 0 0 0 0 1 - -# Same as previous but returned as a factor with unused levels dropped -smb1$LCat10B <- lencat(smb1$lencap,w=10,as.fact=TRUE,droplevels=TRUE) -head(smb1) + +# Same as previous but returned as a factor with unused levels dropped +smb1$LCat10B <- lencat(smb1$lencap,w=10,as.fact=TRUE,droplevels=TRUE) +head(smb1) #> species lake gear yearcap fish agecap lencap LCat10 LCat10A LCat10B #> 1 SMB WB E 1988 5 1 71 70 70 70 #> 2 SMB WB E 1988 3 1 64 60 60 60 @@ -316,16 +340,16 @@

    Examples

    #> 4 SMB WB E 1988 4 1 68 60 60 60 #> 5 SMB WB E 1988 6 1 72 70 70 70 #> 6 SMB WB E 1988 7 1 80 80 80 80 -xtabs(~LCat10B,data=smb1) +xtabs(~LCat10B,data=smb1) #> LCat10B #> 50 60 70 80 90 100 110 120 130 140 150 160 170 180 190 200 210 220 230 240 #> 3 4 7 5 7 20 21 23 23 23 35 22 7 12 21 21 23 15 19 18 #> 250 260 270 280 290 300 310 320 330 340 360 440 #> 19 23 17 22 17 4 4 4 2 1 2 1 - -# 25 mm length classes - in custom variable name -smb1$LCat25 <- lencat(smb1$lencap,w=25) -head(smb1) + +# 25 mm length classes - in custom variable name +smb1$LCat25 <- lencat(smb1$lencap,w=25) +head(smb1) #> species lake gear yearcap fish agecap lencap LCat10 LCat10A LCat10B LCat25 #> 1 SMB WB E 1988 5 1 71 70 70 70 50 #> 2 SMB WB E 1988 3 1 64 60 60 60 50 @@ -333,14 +357,14 @@

    Examples

    #> 4 SMB WB E 1988 4 1 68 60 60 60 50 #> 5 SMB WB E 1988 6 1 72 70 70 70 50 #> 6 SMB WB E 1988 7 1 80 80 80 80 75 -xtabs(~LCat25,data=smb1) +xtabs(~LCat25,data=smb1) #> LCat25 #> 50 75 100 125 150 175 200 225 250 275 300 325 350 425 #> 12 14 52 58 60 37 51 45 48 50 9 6 2 1 - -# using values from psdVal for Smallmouth Bass -smb1$PSDCat1 <- lencat(smb1$lencap,breaks=psdVal("Smallmouth Bass")) -head(smb1) + +# using values from psdVal for Smallmouth Bass +smb1$PSDCat1 <- lencat(smb1$lencap,breaks=psdVal("Smallmouth Bass")) +head(smb1) #> species lake gear yearcap fish agecap lencap LCat10 LCat10A LCat10B LCat25 #> 1 SMB WB E 1988 5 1 71 70 70 70 50 #> 2 SMB WB E 1988 3 1 64 60 60 60 50 @@ -355,14 +379,14 @@

    Examples

    #> 4 0 #> 5 0 #> 6 0 -xtabs(~PSDCat1,data=smb1) +xtabs(~PSDCat1,data=smb1) #> PSDCat1 #> 0 180 280 350 430 #> 200 188 54 2 1 - -# add category names -smb1$PSDCat2 <- lencat(smb1$lencap,breaks=psdVal("Smallmouth Bass"),use.names=TRUE) -head(smb1) + +# add category names +smb1$PSDCat2 <- lencat(smb1$lencap,breaks=psdVal("Smallmouth Bass"),use.names=TRUE) +head(smb1) #> species lake gear yearcap fish agecap lencap LCat10 LCat10A LCat10B LCat25 #> 1 SMB WB E 1988 5 1 71 70 70 70 50 #> 2 SMB WB E 1988 3 1 64 60 60 60 50 @@ -377,15 +401,15 @@

    Examples

    #> 4 0 substock #> 5 0 substock #> 6 0 substock -xtabs(~PSDCat2,data=smb1) +xtabs(~PSDCat2,data=smb1) #> PSDCat2 #> substock stock quality preferred memorable trophy #> 200 188 54 2 1 0 - -# same as above but drop the unused levels -smb1$PSDCat2A <- lencat(smb1$lencap,breaks=psdVal("Smallmouth Bass"), - use.names=TRUE,droplevels=TRUE) -head(smb1) + +# same as above but drop the unused levels +smb1$PSDCat2A <- lencat(smb1$lencap,breaks=psdVal("Smallmouth Bass"), + use.names=TRUE,droplevels=TRUE) +head(smb1) #> species lake gear yearcap fish agecap lencap LCat10 LCat10A LCat10B LCat25 #> 1 SMB WB E 1988 5 1 71 70 70 70 50 #> 2 SMB WB E 1988 3 1 64 60 60 60 50 @@ -400,11 +424,11 @@

    Examples

    #> 4 0 substock substock #> 5 0 substock substock #> 6 0 substock substock -xtabs(~PSDCat2A,data=smb1) +xtabs(~PSDCat2A,data=smb1) #> PSDCat2A #> substock stock quality preferred memorable #> 200 188 54 2 1 -str(smb1) +str(smb1) #> 'data.frame': 445 obs. of 14 variables: #> $ species : Factor w/ 1 level "SMB": 1 1 1 1 1 1 1 1 1 1 ... #> $ lake : Factor w/ 1 level "WB": 1 1 1 1 1 1 1 1 1 1 ... @@ -420,11 +444,11 @@

    Examples

    #> $ PSDCat1 : num 0 0 0 0 0 0 0 0 0 0 ... #> $ PSDCat2 : Factor w/ 6 levels "substock","stock",..: 1 1 1 1 1 1 1 1 1 1 ... #> $ PSDCat2A: Factor w/ 5 levels "substock","stock",..: 1 1 1 1 1 1 1 1 1 1 ... - -# same as above but not returned as a factor (returned as a character) -smb1$PSDcat2B <- lencat(smb1$lencap,breaks=psdVal("Smallmouth Bass"), - use.names=TRUE,as.fact=FALSE) -str(smb1) + +# same as above but not returned as a factor (returned as a character) +smb1$PSDcat2B <- lencat(smb1$lencap,breaks=psdVal("Smallmouth Bass"), + use.names=TRUE,as.fact=FALSE) +str(smb1) #> 'data.frame': 445 obs. of 15 variables: #> $ species : Factor w/ 1 level "SMB": 1 1 1 1 1 1 1 1 1 1 ... #> $ lake : Factor w/ 1 level "WB": 1 1 1 1 1 1 1 1 1 1 ... @@ -441,11 +465,11 @@

    Examples

    #> $ PSDCat2 : Factor w/ 6 levels "substock","stock",..: 1 1 1 1 1 1 1 1 1 1 ... #> $ PSDCat2A: Factor w/ 5 levels "substock","stock",..: 1 1 1 1 1 1 1 1 1 1 ... #> $ PSDcat2B: chr "substock" "substock" "substock" "substock" ... - -## A Sixth example -- similar to fifth example but using the formula notation -# 10 mm length classes - in default LCat variable -smb2 <- lencat(~lencap,data=smb2,w=10) -head(smb2) + +## A Sixth example -- similar to fifth example but using the formula notation +# 10 mm length classes - in default LCat variable +smb2 <- lencat(~lencap,data=smb2,w=10) +head(smb2) #> species lake gear yearcap fish agecap lencap LCat #> 1 SMB WB E 1988 5 1 71 70 #> 2 SMB WB E 1988 3 1 64 60 @@ -453,10 +477,10 @@

    Examples

    #> 4 SMB WB E 1988 4 1 68 60 #> 5 SMB WB E 1988 6 1 72 70 #> 6 SMB WB E 1988 7 1 80 80 - -# 25 mm length classes - in custom variable name -smb2 <- lencat(~lencap,data=smb2,w=25,vname="LenCat25") -head(smb2) + +# 25 mm length classes - in custom variable name +smb2 <- lencat(~lencap,data=smb2,w=25,vname="LenCat25") +head(smb2) #> species lake gear yearcap fish agecap lencap LCat LenCat25 #> 1 SMB WB E 1988 5 1 71 70 50 #> 2 SMB WB E 1988 3 1 64 60 50 @@ -464,10 +488,10 @@

    Examples

    #> 4 SMB WB E 1988 4 1 68 60 50 #> 5 SMB WB E 1988 6 1 72 70 50 #> 6 SMB WB E 1988 7 1 80 80 75 - -# using values from psdVal for Smallmouth Bass -smb2 <- lencat(~lencap,data=smb2,breaks=psdVal("Smallmouth Bass"),vname="LenPsd") -head(smb2) + +# using values from psdVal for Smallmouth Bass +smb2 <- lencat(~lencap,data=smb2,breaks=psdVal("Smallmouth Bass"),vname="LenPsd") +head(smb2) #> species lake gear yearcap fish agecap lencap LCat LenCat25 LenPsd #> 1 SMB WB E 1988 5 1 71 70 50 0 #> 2 SMB WB E 1988 3 1 64 60 50 0 @@ -475,11 +499,11 @@

    Examples

    #> 4 SMB WB E 1988 4 1 68 60 50 0 #> 5 SMB WB E 1988 6 1 72 70 50 0 #> 6 SMB WB E 1988 7 1 80 80 75 0 - -# add category names -smb2 <- lencat(~lencap,data=smb2,breaks=psdVal("Smallmouth Bass"),vname="LenPsd2", - use.names=TRUE,droplevels=TRUE) -head(smb2) + +# add category names +smb2 <- lencat(~lencap,data=smb2,breaks=psdVal("Smallmouth Bass"),vname="LenPsd2", + use.names=TRUE,droplevels=TRUE) +head(smb2) #> species lake gear yearcap fish agecap lencap LCat LenCat25 LenPsd LenPsd2 #> 1 SMB WB E 1988 5 1 71 70 50 0 substock #> 2 SMB WB E 1988 3 1 64 60 50 0 substock @@ -487,7 +511,7 @@

    Examples

    #> 4 SMB WB E 1988 4 1 68 60 50 0 substock #> 5 SMB WB E 1988 6 1 72 70 50 0 substock #> 6 SMB WB E 1988 7 1 80 80 75 0 substock -str(smb2) +str(smb2) #> 'data.frame': 445 obs. of 11 variables: #> $ species : Factor w/ 1 level "SMB": 1 1 1 1 1 1 1 1 1 1 ... #> $ lake : Factor w/ 1 level "WB": 1 1 1 1 1 1 1 1 1 1 ... @@ -500,29 +524,25 @@

    Examples

    #> $ LenCat25: num 50 50 50 50 50 75 50 75 75 50 ... #> $ LenPsd : num 0 0 0 0 0 0 0 0 0 0 ... #> $ LenPsd2 : Factor w/ 5 levels "substock","stock",..: 1 1 1 1 1 1 1 1 1 1 ... - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/logbtcf.html b/docs/reference/logbtcf.html index e1b88124..7e9634e7 100644 --- a/docs/reference/logbtcf.html +++ b/docs/reference/logbtcf.html @@ -1,148 +1,150 @@ -Constructs the correction-factor used when back-transforming log-transformed values. — logbtcf • FSAConstructs the correction-factor used when back-transforming log-transformed values. — logbtcf • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Constructs the correction-factor used when back-transforming log-transformed values according to Sprugel (1983). Sprugel's main formula -- exp((syx^2)/2) -- is used when syx is estimated for natural log transformed data. A correction for any base is obtained by multiplying the syx term by log_e(base) to give exp(((log_e(base)*syx)^2)/2). This more general formula is implemented here (if, of course, the base is exp(1) then the general formula reduces to the original specific formula).

    -
    -
    logbtcf(obj, base = exp(1))
    +
    +

    Usage

    +
    logbtcf(obj, base = exp(1))
    -
    -

    Arguments

    +
    +

    Arguments

    obj

    An object from lm.

    + +
    base

    A single numeric that indicates the base of the logarithm used.

    +
    -
    -

    Value

    -

    A numeric value that is the correction factor according to Sprugel (1983).

    +
    +

    Value

    + + +

    A numeric value that is the correction factor according to Sprugel (1983).

    -
    -

    References

    +
    +

    References

    Sprugel, D.G. 1983. Correcting for bias in log-transformed allometric equations. Ecology 64:209-210.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    # toy data
    -df <- data.frame(y=rlnorm(10),x=rlnorm(10))
    -df$logey <- log(df$y)
    -df$log10y <- log10(df$y)
    -df$logex <- log(df$x)
    -df$log10x <- log10(df$x)
    -
    -# model and predictions on loge scale
    -lme <- lm(logey~logex,data=df)
    -( ploge <- predict(lme,data.frame(logex=log(10))) )
    +    
    +

    Examples

    +
    # toy data
    +df <- data.frame(y=rlnorm(10),x=rlnorm(10))
    +df$logey <- log(df$y)
    +df$log10y <- log10(df$y)
    +df$logex <- log(df$x)
    +df$log10x <- log10(df$x)
    +
    +# model and predictions on loge scale
    +lme <- lm(logey~logex,data=df)
    +( ploge <- predict(lme,data.frame(logex=log(10))) )
     #>        1 
     #> 1.691612 
    -( pe <- exp(ploge) )
    +( pe <- exp(ploge) )
     #>        1 
     #> 5.428224 
    -( cfe <- logbtcf(lme) )
    +( cfe <- logbtcf(lme) )
     #> [1] 1.381993
    -( cpe <- cfe*pe )
    +( cpe <- cfe*pe )
     #>        1 
     #> 7.501769 
    -
    -# model and predictions on log10 scale
    -lm10 <- lm(log10y~log10x,data=df)
    -plog10 <- predict(lm10,data.frame(log10x=log10(10)))
    -p10 <- 10^(plog10)
    -( cf10 <- logbtcf(lm10,10) )
    +
    +# model and predictions on log10 scale
    +lm10 <- lm(log10y~log10x,data=df)
    +plog10 <- predict(lm10,data.frame(log10x=log10(10)))
    +p10 <- 10^(plog10)
    +( cf10 <- logbtcf(lm10,10) )
     #> [1] 1.381993
    -( cp10 <- cf10*p10 )
    +( cp10 <- cf10*p10 )
     #>        1 
     #> 7.501769 
    -
    -# cfe and cf10, cpe and cp10 should be equal
    -all.equal(cfe,cf10)
    +
    +# cfe and cf10, cpe and cp10 should be equal
    +all.equal(cfe,cf10)
     #> [1] TRUE
    -all.equal(cpe,cp10)
    +all.equal(cpe,cp10)
     #> [1] TRUE
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/lwCompPreds.html b/docs/reference/lwCompPreds.html index ff9ba9df..eff40144 100644 --- a/docs/reference/lwCompPreds.html +++ b/docs/reference/lwCompPreds.html @@ -1,153 +1,193 @@ -Constructs plots of predicted weights at given lengths among different groups. — lwCompPreds • FSAConstructs plots of predicted weights at given lengths among different groups. — lwCompPreds • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Constructs plots of predicted weights at given lengths among different groups. These plots allow the user to explore differences in predicted weights at a variety of lengths when the weight-length relationship is not the same across a variety of groups.

    -
    -
    lwCompPreds(
    -  object,
    -  lens = NULL,
    -  qlens = c(0.05, 0.25, 0.5, 0.75, 0.95),
    -  qlens.dec = 1,
    -  base = exp(1),
    -  interval = c("confidence", "prediction", "both"),
    -  center.value = 0,
    -  lwd = 1,
    -  connect.preds = TRUE,
    -  show.preds = FALSE,
    -  col.connect = "gray70",
    -  ylim = NULL,
    -  main.pre = "Length==",
    -  cex.main = 0.8,
    -  xlab = "Groups",
    -  ylab = "Predicted Weight",
    -  yaxs = "r",
    -  rows = round(sqrt(num)),
    -  cols = ceiling(sqrt(num))
    -)
    +
    +

    Usage

    +
    lwCompPreds(
    +  object,
    +  lens = NULL,
    +  qlens = c(0.05, 0.25, 0.5, 0.75, 0.95),
    +  qlens.dec = 1,
    +  base = exp(1),
    +  interval = c("confidence", "prediction", "both"),
    +  center.value = 0,
    +  lwd = 1,
    +  connect.preds = TRUE,
    +  show.preds = FALSE,
    +  col.connect = "gray70",
    +  ylim = NULL,
    +  main.pre = "Length==",
    +  cex.main = 0.8,
    +  xlab = "Groups",
    +  ylab = "Predicted Weight",
    +  yaxs = "r",
    +  rows = round(sqrt(num)),
    +  cols = ceiling(sqrt(num))
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    object

    An lm object (i.e., returned from fitting a model with lm). This model should have log(weight) as the response and log(length) as the explanatory covariate and an explanatory factor variable that describes the different groups.

    + +
    lens

    A numeric vector that indicates the lengths at which the weights should be predicted.

    + +
    qlens

    A numeric vector that indicates the quantiles of lengths at which weights should be predicted. This is ignored if lens is non-null.

    + +
    qlens.dec

    A single numeric that identifies the decimal place that the lengths derived from qlens should be rounded to (Default is 1).

    + +
    base

    A single positive numeric value that indicates the base of the logarithm used in the lm object in object. The default is exp(1), or the value e.

    + +
    interval

    A single string that indicates whether to plot confidence (="confidence"), prediction (="prediction"), or both (="both") intervals.

    + +
    center.value

    A single numeric value that indicates the log length used if the log length data was centered when constructing object.

    + +
    lwd

    A single numeric that indicates the line width to be used for the confidence and prediction interval lines (if not interval="both") and the prediction connections line. If interval="both" then the width of the prediction interval will be one less than this value so that the CI and PI appear different.

    + +
    connect.preds

    A logical that indicates whether the predicted values should be connected with a line across groups or not.

    + +
    show.preds

    A logical that indicates whether the predicted values should be plotted with a point for each group or not.

    + +
    col.connect

    A color to use for the line that connects the predicted values (if connect.preds=TRUE).

    + +
    ylim

    A numeric vector of length two that indicates the limits of the y-axis to be used for each plot. If null then limits will be chosen for each graph individually.

    + +
    main.pre

    A character string to be used as a prefix for the main title. See details.

    + +
    cex.main

    A numeric value for the character expansion of the main title. See details.

    + +
    xlab

    A single string for labeling the x-axis.

    + +
    ylab

    A single string for labeling the y-axis.

    + +
    yaxs

    A single string that indicates how the y-axis is formed. See par for more details.

    + +
    rows

    A single numeric that contains the number of rows to use on the graphic.

    + +
    cols

    A single numeric that contains the number of columns to use on the graphic.

    +
    -
    -

    Value

    -

    None. However, a plot is produced.

    +
    +

    Value

    + + +

    None. However, a plot is produced.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    7-Weight-Length.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    # add log length and weight data to ChinookArg data
    -ChinookArg$logtl <- log(ChinookArg$tl)
    -ChinookArg$logwt <- log(ChinookArg$w)
    -# fit model to assess equality of slopes
    -lm1 <- lm(logwt~logtl*loc,data=ChinookArg)
    -anova(lm1)
    +    
    +

    Examples

    +
    # add log length and weight data to ChinookArg data
    +ChinookArg$logtl <- log(ChinookArg$tl)
    +ChinookArg$logwt <- log(ChinookArg$w)
    +# fit model to assess equality of slopes
    +lm1 <- lm(logwt~logtl*loc,data=ChinookArg)
    +anova(lm1)
     #> Analysis of Variance Table
     #> 
     #> Response: logwt
    @@ -158,55 +198,51 @@ 

    Examples

    #> Residuals 106 10.864 0.102 #> --- #> Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 - -# set graphing parameters so that the plots will look decent -op <- par(mar=c(3.5,3.5,1,1),mgp=c(1.8,0.4,0),tcl=-0.2) -# show predicted weights (w/ CI) at the default quantile lengths -lwCompPreds(lm1,xlab="Location") + +# set graphing parameters so that the plots will look decent +op <- par(mar=c(3.5,3.5,1,1),mgp=c(1.8,0.4,0),tcl=-0.2) +# show predicted weights (w/ CI) at the default quantile lengths +lwCompPreds(lm1,xlab="Location") -# show predicted weights (w/ CI) at the quartile lengths -lwCompPreds(lm1,xlab="Location",qlens=c(0.25,0.5,0.75)) +# show predicted weights (w/ CI) at the quartile lengths +lwCompPreds(lm1,xlab="Location",qlens=c(0.25,0.5,0.75)) -# show predicted weights (w/ CI) at certain lengths -lwCompPreds(lm1,xlab="Location",lens=c(60,90,120,150)) +# show predicted weights (w/ CI) at certain lengths +lwCompPreds(lm1,xlab="Location",lens=c(60,90,120,150)) -# show predicted weights (w/ just PI) at certain lengths -lwCompPreds(lm1,xlab="Location",lens=c(60,90,120,150),interval="prediction") +# show predicted weights (w/ just PI) at certain lengths +lwCompPreds(lm1,xlab="Location",lens=c(60,90,120,150),interval="prediction") -lwCompPreds(lm1,xlab="Location",lens=c(60,90,120,150),connect.preds=FALSE,show.preds=TRUE) +lwCompPreds(lm1,xlab="Location",lens=c(60,90,120,150),connect.preds=FALSE,show.preds=TRUE) - -# fit model with a different base (plot should be the same as the first example) -ChinookArg$logtl <- log10(ChinookArg$tl) -ChinookArg$logwt <- log10(ChinookArg$w) -lm1 <- lm(logwt~logtl*loc,data=ChinookArg) -lwCompPreds(lm1,base=10,xlab="Location") + +# fit model with a different base (plot should be the same as the first example) +ChinookArg$logtl <- log10(ChinookArg$tl) +ChinookArg$logwt <- log10(ChinookArg$w) +lm1 <- lm(logwt~logtl*loc,data=ChinookArg) +lwCompPreds(lm1,base=10,xlab="Location") -## return graphing parameters to original state -par(op) - +## return graphing parameters to original state +par(op) +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/metaM.html b/docs/reference/metaM.html index d8dc69a3..460e8c40 100644 --- a/docs/reference/metaM.html +++ b/docs/reference/metaM.html @@ -1,127 +1,159 @@ -Estimate natural mortality from a variety of empirical methods. — Mmethods • FSAEstimate natural mortality from a variety of empirical methods. — Mmethods • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Several methods can be used to estimated natural mortality (M) from other types of data, including parameters from the von Bertalanffy growth equation, maximum age, and temperature. These relationships have been developed from meta-analyses of a large number of populations. Several of these methods are implemented in this function.

    -
    -
    Mmethods(what = c("all", "tmax", "K", "Hoenig", "Pauly"))
    -
    -metaM(
    -  method = Mmethods(),
    -  justM = TRUE,
    -  tmax = NULL,
    -  K = NULL,
    -  Linf = NULL,
    -  t0 = NULL,
    -  b = NULL,
    -  L = NULL,
    -  Temp = NULL,
    -  t50 = NULL,
    -  Winf = NULL
    -)
    -
    -# S3 method for metaM
    -print(x, digits = 4, ...)
    +
    +

    Usage

    +
    Mmethods(what = c("all", "tmax", "K", "Hoenig", "Pauly"))
    +
    +metaM(
    +  method = Mmethods(),
    +  justM = TRUE,
    +  tmax = NULL,
    +  K = NULL,
    +  Linf = NULL,
    +  t0 = NULL,
    +  b = NULL,
    +  L = NULL,
    +  Temp = NULL,
    +  t50 = NULL,
    +  Winf = NULL
    +)
    +
    +# S3 method for metaM
    +print(x, digits = 4, ...)
    -
    -

    Arguments

    +
    +

    Arguments

    what

    A string that indicates what grouping of methods to return. Defaults to returning all methods.

    + +
    method

    A string that indicates which method or equation to use. See details.

    + +
    justM

    A logical that indicates whether just the estimate of M (TRUE; Default) or a more descriptive list should be returned.

    + +
    tmax

    The maximum age for the population of fish.

    + +
    K

    The Brody growth coefficient from the fit of the von Bertalanffy growth function.

    + +
    Linf

    The asymptotic mean length (cm) from the fit of the von Bertalanffy growth function.

    + +
    t0

    The x-intercept from the fit of the von Bertalanffy growth function.

    + +
    b

    The exponent from the weight-length relationship (slope from the logW-logL relationship).

    + +
    L

    The body length of the fish (cm).

    + +
    Temp

    The temperature experienced by the fish (C).

    + +
    t50

    The age (time) when half the fish in the population are mature.

    + +
    Winf

    The asymptotic mean weight (g) from the fit of the von Bertalanffy growth function.

    + +
    x

    A metaM object returned from metaM when justM=FALSE.

    + +
    digits

    A numeric that controls the number of digits printed for the estimate of M.

    + +
    ...

    Additional arguments for methods. Not implemented.

    +
    -
    -

    Value

    -

    Mmethods returns a character vector with a list of methods. If only one method is chosen then metaM returns a single numeric if justM=TRUE or, otherwise, a metaM object that is a list with the following items:

    • method: The name for the method within the function (as given in method).

    • +
      +

      Value

      + + +

      Mmethods returns a character vector with a list of methods. If only one method is chosen then metaM returns a single numeric if justM=TRUE or, otherwise, a metaM object that is a list with the following items:

      • method: The name for the method within the function (as given in method).

      • name: A more descriptive name for the method.

      • givens: A vector of values required by the method to estimate M.

      • M: The estimated natural mortality rate.

      If multiple methods are chosen then a data.frame is returned with the method name abbreviation in the method variable and the associated estimated M in the M variable.

      -
      -

      Details

      +
      +

      Details

      One of several methods is chosen with method. The available methods can be seen with Mmethods() and are listed below with a brief description of where the equation came from. The sources (listed below) should be consulted for more specific information.

      • method="HoenigNLS": The “modified Hoenig equation derived with a non-linear model” as described in Then et al. (2015) on the third line of Table 3. This method was the preferred method suggested by Then et al. (2015). Requires only tmax.

      • method="PaulyLNoT": The “modified Pauly length equation” as described on the sixth line of Table 3 in Then et al. (2015). Then et al. (2015) suggested that this is the preferred model if maximum age (tmax) information was not available. Requires K and Linf.

      • method="PaulyL": The “Pauly (1980) equation using fish lengths” from his equation 11. This is the most commonly used method in the literature. Note that Pauly used common logarithms as used here but the model is often presented in other sources with natural logarithms. Requires K, Linf, and T.

      • @@ -142,18 +174,18 @@

        Details

      • method="RikhterEfanov1": The “Rikhter and Efanov (1976) equation (#2)” as given in the second column of page 541 of Kenchington (2014) and in Table 6.4 of Miranda and Bettoli (2007). Requires only t50.

      • method="RikhterEfanov2": The “Rikhter and Efanov (1976) equation (#1)” as given in the first column of page 541 of Kenchington (2014). Requires t50, K, t0, and b.

      -
      -

      Testing

      +
      +

      Testing

      Kenchington (2014) provided life history parameters for several stocks and used many models to estimate M. I checked the calculations for the PaulyL, PaulyW, HoenigO for Hgroup="all" and Hgroup="fish", HoenigO2 for Hgroup="all" and Hgroup="fish", "JensenK1", "Gislason", "AlversonCarney", "Charnov", "ZhangMegrey", "RikhterEfanov1", and "RikhterEfanov2" methods for three stocks. All results perfectly matched Kenchington's results for Chesapeake Bay Anchovy and Rio Formosa Seahorse. For the Norwegian Fjord Lanternfish, all results perfectly matched Kenchington's results except for when Hgroup="fish" for both HoenigO and HoenigO2.

      Results for the Rio Formosa Seahorse data were also tested against results from M.empirical from fishmethods for the PaulyL, PaulyW, HoenigO for Hgroup="all" and Hgroup="fish", "Gislason", and "AlversonCarney" methods (the only methods in common between the two packages). All results matched perfectly.

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      11-Mortality.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Alverson, D.L. and M.J. Carney. 1975. A graphic review of the growth and decay of population cohorts. Journal du Conseil International pour l'Exploration de la Mer. 36:133-143.

      Charnov, E.L., H. Gislason, and J.G. Pope. 2013. Evolutionary assembly rules for fish life histories. Fish and Fisheries. 14:213-224.

      Gislason, H., N. Daan, J.C. Rice, and J.G. Pope. 2010. Size, growth, temperature and the natural mortality of marine fish. Fish and Fisheries 11:149-158.

      @@ -167,19 +199,19 @@

      References

      Then, A.Y., J.M. Hoenig, N.G. Hall, and D.A. Hewitt. 2015. Evaluating the predictive performance of empirical estimators of natural mortality rate using information on over 200 fish species. ICES Journal of Marine Science. 72:82-92.

      Zhang, C-I and B.A. Megrey. 2006. A revised Alverson and Carney model for estimating the instantaneous rate of natural mortality. Transactions of the American Fisheries Society. 135-620-633. [Was (is?) from http://www.pmel.noaa.gov/foci/publications/2006/zhan0531.pdf.]

      -
      -

      See also

      +
      +

      See also

      See M.empirical in fishmethods for similar functionality.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      ## List names for available methods
      -Mmethods()
      +    
      +

      Examples

      +
      ## List names for available methods
      +Mmethods()
       #>  [1] "HoenigNLS"      "HoenigO"        "HoenigOF"       "HoenigOM"      
       #>  [5] "HoenigOC"       "HoenigO2"       "HoenigO2F"      "HoenigO2M"     
       #>  [9] "HoenigO2C"      "HoenigLM"       "HewittHoenig"   "tmax1"         
      @@ -187,60 +219,60 @@ 

      Examples

      #> [17] "K2" "JensenK1" "JensenK2" "Gislason" #> [21] "AlversonCarney" "Charnov" "ZhangMegreyD" "ZhangMegreyP" #> [25] "RikhterEfanov1" "RikhterEfanov2" -Mmethods("tmax") +Mmethods("tmax") #> [1] "tmax1" "HoenigNLS" "HoenigO" "HoenigOF" "HoenigOM" #> [6] "HoenigOC" "HoenigO2" "HoenigO2F" "HoenigO2M" "HoenigO2C" #> [11] "HoenigLM" "HewittHoenig" - -## Simple Examples -metaM("tmax",tmax=20) + +## Simple Examples +metaM("tmax",tmax=20) #> [1] 0.25545 -metaM("tmax",tmax=20,justM=FALSE) +metaM("tmax",tmax=20,justM=FALSE) #> M=0.2554 as estimated with Then et al. (2015) tmax equation #> with givens: tmax=20 -metaM("HoenigNLS",tmax=20) +metaM("HoenigNLS",tmax=20) #> [1] 0.3150387 -metaM("HoenigNLS",tmax=20,justM=FALSE) +metaM("HoenigNLS",tmax=20,justM=FALSE) #> M=0.315 as estimated with Then et al. (2015) Hoenig (NLS) equation #> with givens: tmax=20 - -## Example Patagonian Sprat ... from Table 2 in Cerna et al. (2014) -## http://www.scielo.cl/pdf/lajar/v42n3/art15.pdf -Temp <- 11 -Linf <- 17.71 -K <- 0.78 -t0 <- -0.46 -tmax <- t0+3/K -t50 <- t0-(1/K)*log(1-13.5/Linf) -metaM("RikhterEfanov1",t50=t50) + +## Example Patagonian Sprat ... from Table 2 in Cerna et al. (2014) +## http://www.scielo.cl/pdf/lajar/v42n3/art15.pdf +Temp <- 11 +Linf <- 17.71 +K <- 0.78 +t0 <- -0.46 +tmax <- t0+3/K +t50 <- t0-(1/K)*log(1-13.5/Linf) +metaM("RikhterEfanov1",t50=t50) #> [1] 1.050009 -metaM("PaulyL",K=K,Linf=Linf,Temp=Temp) +metaM("PaulyL",K=K,Linf=Linf,Temp=Temp) #> [1] 1.14058 -metaM("PaulyL",K=K,Linf=Linf,Temp=Temp,justM=FALSE) +metaM("PaulyL",K=K,Linf=Linf,Temp=Temp,justM=FALSE) #> M=1.1406 as estimated with Pauly (1980) length equation #> with givens: K=0.78, Linf=17.71, Temp=11 -metaM("HoenigNLS",tmax=tmax) +metaM("HoenigNLS",tmax=tmax) #> [1] 1.602862 -metaM("HoenigO",tmax=tmax) +metaM("HoenigO",tmax=tmax) #> [1] 1.274125 -metaM("HewittHoenig",tmax=tmax) +metaM("HewittHoenig",tmax=tmax) #> [1] 1.246252 -metaM("AlversonCarney",K=K,tmax=tmax) +metaM("AlversonCarney",K=K,tmax=tmax) #> [1] 1.35398 - -## Example of multiple calculations -metaM(c("RikhterEfanov1","PaulyL","HoenigO","HewittHoenig","AlversonCarney"), - K=K,Linf=Linf,Temp=Temp,tmax=tmax,t50=t50) + +## Example of multiple calculations +metaM(c("RikhterEfanov1","PaulyL","HoenigO","HewittHoenig","AlversonCarney"), + K=K,Linf=Linf,Temp=Temp,tmax=tmax,t50=t50) #> method M #> 1 RikhterEfanov1 1.050009 #> 2 PaulyL 1.140580 #> 3 HoenigO 1.274125 #> 4 HewittHoenig 1.246252 #> 5 AlversonCarney 1.353980 - -## Example of multiple methods using Mmethods -# select some methods -metaM(Mmethods()[-c(15,20,22:24,26)],K=K,Linf=Linf,Temp=Temp,tmax=tmax,t50=t50) + +## Example of multiple methods using Mmethods +# select some methods +metaM(Mmethods()[-c(15,20,22:24,26)],K=K,Linf=Linf,Temp=Temp,tmax=tmax,t50=t50) #> method M #> 1 HoenigNLS 1.6028619 #> 2 HoenigO 1.2741252 @@ -262,8 +294,8 @@

      Examples

      #> 18 JensenK2 1.3566000 #> 19 AlversonCarney 1.3539801 #> 20 RikhterEfanov1 1.0500095 -# select just the Hoenig methods -metaM(Mmethods("Hoenig"),K=K,Linf=Linf,Temp=Temp,tmax=tmax,t50=t50) +# select just the Hoenig methods +metaM(Mmethods("Hoenig"),K=K,Linf=Linf,Temp=Temp,tmax=tmax,t50=t50) #> method M #> 1 HoenigNLS 1.6028619 #> 2 HoenigO 1.2741252 @@ -276,29 +308,25 @@

      Examples

      #> 9 HoenigO2C 1.4625421 #> 10 HoenigLM 1.6243510 #> 11 HewittHoenig 1.2462517 - +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/mrClosed.html b/docs/reference/mrClosed.html index 4e7e890c..ccbfcf64 100644 --- a/docs/reference/mrClosed.html +++ b/docs/reference/mrClosed.html @@ -1,197 +1,259 @@ -Estimate initial population size for single or multiple census mark-recapture data. — mrClosed • FSAEstimate initial population size for single or multiple census mark-recapture data. — mrClosed • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Estimates of the initial population size, along with associated confidence intervals, are constructed from single or multiple census mark-recapture data using a variety of methods. For single census data, the initial population size (N) is estimated from the number of marked animals from a first sample (M), number of captured animals in a second sample (n), and the number of recaptured marked animals in the second sample (m) using either the ‘naive’ Petersen method or Chapman, Ricker, or Bailey modifications of the Petersen method. Single census data can also be separated by group (e.g., size class) to estimate the initial population size by class and for the overall population size. For multiple census data, the initial population size is estimated from the number of captured animals (n), number of recaptured marked animals (m), the number of marked animals that are marked and returned to the population (R), or the number of extant marked animals prior to the sample (M) on each of several samples using either the Schnabel (1938) or Schumacher-Eschmeyer (1943) method.

    -
    -
    mrClosed(
    -  M = NULL,
    -  n = NULL,
    -  m = NULL,
    -  R = NULL,
    -  method = c("Petersen", "Chapman", "Ricker", "Bailey", "Schnabel",
    -    "SchumacherEschmeyer"),
    -  labels = NULL,
    -  chapman.mod = TRUE
    -)
    -
    -# S3 method for mrClosed1
    -summary(
    -  object,
    -  digits = 0,
    -  incl.SE = FALSE,
    -  incl.all = TRUE,
    -  verbose = FALSE,
    -  ...
    -)
    -
    -# S3 method for mrClosed1
    -confint(
    -  object,
    -  parm = NULL,
    -  level = conf.level,
    -  conf.level = 0.95,
    -  digits = 0,
    -  type = c("suggested", "binomial", "hypergeometric", "normal", "Poisson"),
    -  bin.type = c("wilson", "exact", "asymptotic"),
    -  poi.type = c("exact", "daly", "byar", "asymptotic"),
    -  incl.all = TRUE,
    -  verbose = FALSE,
    -  ...
    -)
    -
    -# S3 method for mrClosed2
    -summary(object, digits = 0, verbose = FALSE, ...)
    -
    -# S3 method for mrClosed2
    -confint(
    -  object,
    -  parm = NULL,
    -  level = conf.level,
    -  conf.level = 0.95,
    -  digits = 0,
    -  type = c("suggested", "normal", "Poisson"),
    -  poi.type = c("exact", "daly", "byar", "asymptotic"),
    -  verbose = FALSE,
    -  ...
    -)
    -
    -# S3 method for mrClosed2
    -plot(
    -  x,
    -  pch = 19,
    -  col.pt = "black",
    -  xlab = "Marked in Population",
    -  ylab = "Prop. Recaptures in Sample",
    -  loess = FALSE,
    -  lty.loess = 2,
    -  lwd.loess = 1,
    -  col.loess = "gray20",
    -  trans.loess = 10,
    -  span = 0.9,
    -  ...
    -)
    +
    +

    Usage

    +
    mrClosed(
    +  M = NULL,
    +  n = NULL,
    +  m = NULL,
    +  R = NULL,
    +  method = c("Petersen", "Chapman", "Ricker", "Bailey", "Schnabel",
    +    "SchumacherEschmeyer"),
    +  labels = NULL,
    +  chapman.mod = TRUE
    +)
    +
    +# S3 method for mrClosed1
    +summary(
    +  object,
    +  digits = 0,
    +  incl.SE = FALSE,
    +  incl.all = TRUE,
    +  verbose = FALSE,
    +  ...
    +)
    +
    +# S3 method for mrClosed1
    +confint(
    +  object,
    +  parm = NULL,
    +  level = conf.level,
    +  conf.level = 0.95,
    +  digits = 0,
    +  type = c("suggested", "binomial", "hypergeometric", "normal", "Poisson"),
    +  bin.type = c("wilson", "exact", "asymptotic"),
    +  poi.type = c("exact", "daly", "byar", "asymptotic"),
    +  incl.all = TRUE,
    +  verbose = FALSE,
    +  ...
    +)
    +
    +# S3 method for mrClosed2
    +summary(object, digits = 0, verbose = FALSE, ...)
    +
    +# S3 method for mrClosed2
    +confint(
    +  object,
    +  parm = NULL,
    +  level = conf.level,
    +  conf.level = 0.95,
    +  digits = 0,
    +  type = c("suggested", "normal", "Poisson"),
    +  poi.type = c("exact", "daly", "byar", "asymptotic"),
    +  verbose = FALSE,
    +  ...
    +)
    +
    +# S3 method for mrClosed2
    +plot(
    +  x,
    +  pch = 19,
    +  col.pt = "black",
    +  xlab = "Marked in Population",
    +  ylab = "Prop. Recaptures in Sample",
    +  loess = FALSE,
    +  lty.loess = 2,
    +  lwd.loess = 1,
    +  col.loess = "gray20",
    +  trans.loess = 10,
    +  span = 0.9,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    M

    A numeric representing the number of marked fish from the first sample (single-census), an object from capHistSum() (single- or multiple-census), or numeric vector of marked fish prior to ith samples (multiple-census).

    + +
    n

    A numeric representing the number of captured fish in the second sample (single-census) or numeric vector of captured fish in ith sample (multiple-census).

    + +
    m

    A numeric representing the number of recaptured (marked) fish in the second sample (single-census) or numeric vector of recaptured (marked) fish in ith sample (multiple-census).

    + +
    R

    A numeric vector representing the number of marked fish returned to the population (multiple-census). Note that several references use the number of “new” marks returned to the population rather than the “total” number of marks returned to the population that is used here.

    + +
    method

    A single string that identifies the type of calculation method to use in the main function.

    + +
    labels

    A character or character vector used to label the rows of the resulting output matrix when using a single census method separated by groups. Must be the same length as M, n, and m. Defaults to upper-case letters if no values are given.

    + +
    chapman.mod

    A logical that represents whether the Chapman modification should be used (=TRUE, default) or not (=FALSE) when performing the Schnabel multiple census method.

    + +
    object, x

    An mrClosed1 or mrClosed2 object.

    + +
    digits

    The number of decimal digits to round the population estimates to. If incl.SE=TRUE then SE will be rounded to one more decimal place then given in digits.

    + +
    incl.SE

    A logical that indicates whether the results should include the calculated SE value. See details.

    + +
    incl.all

    A logical that indicates whether an overall population estimate should be computed when using a single census method that has been separated into sub-groups. See details.

    + +
    verbose

    A logical that indicates whether a reminder of the inputted values and what type of method was used should be printed with the summary and confidence interval results.

    + +
    ...

    Additional arguments for methods.

    + +
    parm

    Not used here (included in confint generic).

    + +
    level

    Same as conf.level but used for compatibility with confint generic.

    + +
    conf.level

    A numeric representing the level of confidence to use for confidence intervals.

    + +
    type

    A single string that identifies the distribution to use when constructing confidence intervals in confint. See details.

    + +
    bin.type

    A string that identifies the method used to construct binomial confidence intervals (default is "wilson"). This is only used if type="binomial" in confint. See details of binCI.

    + +
    poi.type

    A string that identifies the method used to construct Poisson confidence intervals (default is "exact"). This is only used if type="Poisson" in confint. See details of poiCI.

    + +
    pch

    A numeric used to indicate the type of plotting character.

    + +
    col.pt

    a string used to indicate the color of the plotted points.

    + +
    xlab

    A label for the x-axis.

    + +
    ylab

    A label for the y-axis.

    + +
    loess

    A logical that indicates if a loess smoother line (and approximate 95% confidence band) is fit to and shown on plot.

    + +
    lty.loess

    A single numeric used to indicate the type of line used for the loess line.

    + +
    lwd.loess

    A single numeric used to indicate the line width of the loess line.

    + +
    col.loess

    A single string used to indicate the color of the loess line.

    + +
    trans.loess

    A single numeric that indicates how transparent the loess band should be (larger numbers are more transparent).

    + +
    span

    A single numeric that controls the degree of smoothing. Values closer to 1 are more smooth.

    +
    -
    -

    Value

    -

    A list with the following items

    • M The number of marked fish from the first sample that was provided.

    • +
      +

      Value

      + + +

      A list with the following items

      +

      +
      • M The number of marked fish from the first sample that was provided.

      • n The number of captured fish in the second sample that was provided.

      • m The number of recaptured (marked) fish in the second sample that was provided.

      • M1 The adjusted (depending on type) number of marked fish from the first sample.

      • @@ -203,8 +265,8 @@

        Value

      • N The estimated initial population size.

      • labels Labels for the rows of summary matrix.

      -
      -

      Details

      +
      +

      Details

      For single census data, the following methods can be used:

      • method="Petersen". The ‘naive’ Petersen as computed using equation 2.1 from Krebs (1989).

      • method="Chapman". The Chapman (1951) modification of the Petersen method as computed using equation 2.2 from Krebs (1989).

      • method="Ricker". The Ricker (1975) modification of the Petersen as computed using equation 3.7 from Ricker (1975). This is basically the same method="Chapman" except that Ricker (1975) did NOT subtract a 1 from the answer in the final step. Thus, the estimate from method="Chapman" will always be one less than the estimate from method="Ricker".

      • @@ -224,96 +286,96 @@

        Details

        Confidence intervals for the initial population size using multiple census methods can be constructed using the normal or Poisson distributions for the Schnabel method or the normal distribution for the Schumacher-Eschmeyer method as chosen with type=. If type="suggested" then the type of confidence interval suggested by the rule on p. 32 of Krebs (1989) is used (for the Schnabel method). If type="Poisson" for the Schnabel method then a confidence interval for the sum of m is computed with poiCI and the end points are substituted into the Schnabel equation to produce a CI for the population size. If type="normal" for the Schnabel method then the standard error for the inverse of the population estimate is computed as the square root of equation 2.11 from Krebs (1989) or equation 3.16 from Ricker (1975). The standard error for the Schumacher-Eschmeyer method is for the inverse of the population estimate and is computed with equation 2.14 from Krebs (1989) [Note that the divisor in Krebs (1989) is different than the divisor in equation 3.12 in Ricker (1975), but is consistent with equation 4.17 in Seber (2002).] The confidence interval for the inverse population estimate is constructed from the inverse population estimate plus/minus a t critical value times the standard error for the inverse population estimate. The t critical value uses the number of samples minus 1 for the Schnabel method and the number of samples minus 2 when for the Schumacher-Eschmeyer method according to p. 32 of Krebs (1989) (note that this is different than what Ricker (1975) does). Finally, the confidence interval for the population estimate is obtained by inverting the confidence interval for the inverse population estimate. Note that confidence intervals for the population size when type="normal" may contain negative values (for the upper value) when the population estimate is relatively large and the number of samples is small (say, three) because the intervals are originally constructed on the inverted population estimate and they use the t-distribution.

        The plot can be used to identify assumption violations in the Schnabel and Schumacher-Eschmeyer methods (an error will be returned if used with any of the other methods). If the assumptions ARE met then the plot of the proportion of marked fish in a sample versus the cumulative number of marked fish should look linear. A loess line (with approximate 95% confidence bands) can be added to aid interpretation with loess=TRUE. Note, however, that adding the loess line may return a number of warning or produce a non-informative if the number of samples is small (<8).

      -
      -

      Testing

      +
      +

      Testing

      The results from the single census methods have had the following checks. The population estimates for all methods match reputable sources. The SE for the Chapman and Bailey methods match the results from mrN.single in fishmethods, The CI for the Petersen, Chapman, and Bailey methods partially match (are within 1

      The results for the multiple census methods have had the following checks. The population estimates for both methods match reputable sources. The intermediate calculations for both methods match those in Krebs (1989). The confidence interval for the Schnabel method using the Poisson distribution does NOT match Krebs (1989). This appears to be a difference in the use poiCI here versus distributional tables in Krebs (i.e., the difference appears to be completely in the critical values from the Poisson distribution). The confidence interval for the Schnabel method using the normal or the Poisson distribution do NOT match Ricker (1975), but there is not enough information in Ricker to determine why (it is likely due to numerical differences on the inverse scale). The confidence interval for the Schumacher-Eschmeyer method do match Krebs (1989) but not Ricker (1975). The Ricker result may be due to different df as noted above.

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      9-Abundance from Capture-Recapture Data.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Krebs, C.J. 1989. Ecological Methodology. Addison-Welsey Educational Publishing.

      Ricker, W.E. 1975. Computation and interpretation of biological statistics of fish populations. Technical Report Bulletin 191, Bulletin of the Fisheries Research Board of Canada. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/1485.pdf.]

      Seber, G.A.F. 2002. The Estimation of Animal Abundance and Related Parameters. Edward Arnold, second edition.

      Schnabel, Z.E. 1938. The estimation of the total fish population of a lake. American Mathematician Monthly, 45:348-352.

      Schumacher, F.X. and R.W. Eschmeyer. 1943. The estimation of fish populations in lakes and ponds. Journal of the Tennessee Academy of Sciences, 18:228-249.

      -
      -

      See also

      -

      See capHistSum for generating input data from capture histories. See poiCI, binCI, and hyperCI for specifics on functions used in confidence interval construction. See mrOpen for handling mark-recapture data in an open population. See SunfishIN in FSAdata for an example to test matching of results with Ricker (1975)' See mrN.single and schnabel in fishmethods for similar functionality.

      +
      +

      See also

      +

      See capHistSum for generating input data from capture histories. See poiCI, binCI, and hyperCI for specifics on functions used in confidence interval construction. See mrOpen for handling mark-recapture data in an open population. See SunfishIN in FSAdata for an example to test matching of results with Ricker (1975)' See mrN.single and schnabel in fishmethods for similar functionality.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      ### Single census with no sub-groups
      -## Petersen estimate -- the default
      -mr1 <- mrClosed(346,184,49)
      -summary(mr1)
      +    
      +

      Examples

      +
      ### Single census with no sub-groups
      +## Petersen estimate -- the default
      +mr1 <- mrClosed(346,184,49)
      +summary(mr1)
       #>         N
       #> [1,] 1299
      -summary(mr1,verbose=TRUE)
      +summary(mr1,verbose=TRUE)
       #> Used the 'naive' Petersen method with M=346, n=184, and m=49.
       #>         N
       #> [1,] 1299
      -summary(mr1,incl.SE=TRUE)
      +summary(mr1,incl.SE=TRUE)
       #>         N  SE
       #> [1,] 1299 159
      -summary(mr1,incl.SE=TRUE,digits=1)
      +summary(mr1,incl.SE=TRUE,digits=1)
       #>           N     SE
       #> [1,] 1299.3 158.99
      -confint(mr1)
      +confint(mr1)
       #>      95% LCI 95% UCI
       #> [1,]    1034    1666
      -confint(mr1,verbose=TRUE)
      +confint(mr1,verbose=TRUE)
       #> The binomial (wilson method) distribution was used.
       #>      95% LCI 95% UCI
       #> [1,]    1034    1666
      -confint(mr1,type="hypergeometric")
      +confint(mr1,type="hypergeometric")
       #>      95% LCI 95% UCI
       #> [1,]    1049    1670
      -
      -## Chapman modification of the Petersen estimate
      -mr2 <- mrClosed(346,184,49,method="Chapman")
      -summary(mr2,incl.SE=TRUE)
      +
      +## Chapman modification of the Petersen estimate
      +mr2 <- mrClosed(346,184,49,method="Chapman")
      +summary(mr2,incl.SE=TRUE)
       #>         N    SE
       #> [1,] 1283 142.1
      -summary(mr2,incl.SE=TRUE,verbose=TRUE)
      +summary(mr2,incl.SE=TRUE,verbose=TRUE)
       #> Used Chapman's modification of the Petersen method with M=346, n=184, and m=49.
       #>         N    SE
       #> [1,] 1283 142.1
      -
      -### Single census, using capHistSum() results
      -## data in capture history format
      -str(BluegillJL)
      +
      +### Single census, using capHistSum() results
      +## data in capture history format
      +str(BluegillJL)
       #> 'data.frame':	277 obs. of  2 variables:
       #>  $ first : int  1 0 1 0 1 1 1 1 1 1 ...
       #>  $ second: int  0 1 0 1 0 0 0 0 0 0 ...
      -ch1 <- capHistSum(BluegillJL)
      -mr3 <- mrClosed(ch1)
      -summary(mr3,verbose=TRUE)
      +ch1 <- capHistSum(BluegillJL)
      +mr3 <- mrClosed(ch1)
      +summary(mr3,verbose=TRUE)
       #> Used the 'naive' Petersen method with M=196, n=90, and m=9.
       #>         N
       #> [1,] 1960
      -confint(mr3,verbose=TRUE)
      +confint(mr3,verbose=TRUE)
       #> The Poisson (exact method) distribution was used.
       #>      95% LCI 95% UCI
       #> [1,]    1032    4286
      -
      -### Single census with sub-groups
      -marked <- c(93,35,72,16,46,20)
      -captured <- c(103,30,73,17,39,18)
      -recaps <- c(20,23,52,15,35,16)
      -lbls <- c("YOY","Juvenile","Stock","Quality","Preferred","Memorable")
      -mr4 <- mrClosed(marked,captured,recaps,method="Ricker",labels=lbls)
      -summary(mr4)
      +
      +### Single census with sub-groups
      +marked <- c(93,35,72,16,46,20)
      +captured <- c(103,30,73,17,39,18)
      +recaps <- c(20,23,52,15,35,16)
      +lbls <- c("YOY","Juvenile","Stock","Quality","Preferred","Memorable")
      +mr4 <- mrClosed(marked,captured,recaps,method="Ricker",labels=lbls)
      +summary(mr4)
       #>             N
       #> YOY       466
       #> Juvenile   46
      @@ -322,7 +384,7 @@ 

      Examples

      #> Preferred 52 #> Memorable 23 #> All 708 -summary(mr4,incl.SE=TRUE) +summary(mr4,incl.SE=TRUE) #> N SE #> YOY 466 88.7 #> Juvenile 46 4.4 @@ -331,7 +393,7 @@

      Examples

      #> Preferred 52 2.7 #> Memorable 23 1.8 #> All 708 89.2 -summary(mr4,incl.SE=TRUE,verbose=TRUE) +summary(mr4,incl.SE=TRUE,verbose=TRUE) #> Used Ricker's modification of the Petersen method with observed inputs (by group) of: #> YOY: M=93, n=103, and m=20 #> Juvenile: M=35, n=30, and m=23 @@ -347,7 +409,7 @@

      Examples

      #> Preferred 52 2.7 #> Memorable 23 1.8 #> All 708 89.2 -summary(mr4,incl.SE=TRUE,incl.all=FALSE,verbose=TRUE) +summary(mr4,incl.SE=TRUE,incl.all=FALSE,verbose=TRUE) #> Used Ricker's modification of the Petersen method with observed inputs (by group) of: #> YOY: M=93, n=103, and m=20 #> Juvenile: M=35, n=30, and m=23 @@ -362,7 +424,7 @@

      Examples

      #> Quality 19 1.5 #> Preferred 52 2.7 #> Memorable 23 1.8 -confint(mr4) +confint(mr4) #> 95% LCI 95% UCI #> YOY 325 691 #> Juvenile 41 60 @@ -371,7 +433,7 @@

      Examples

      #> Preferred 49 61 #> Memorable 22 31 #> All 533 883 -confint(mr4,verbose=TRUE) +confint(mr4,verbose=TRUE) #> YOY - The binomial (wilson method) distribution was used. #> Juvenile - The binomial (wilson method) distribution was used. #> Stock - The binomial (wilson method) distribution was used. @@ -387,7 +449,7 @@

      Examples

      #> Preferred 49 61 #> Memorable 22 31 #> All 533 883 -confint(mr4,incl.all=FALSE,verbose=TRUE) +confint(mr4,incl.all=FALSE,verbose=TRUE) #> YOY - The binomial (wilson method) distribution was used. #> Juvenile - The binomial (wilson method) distribution was used. #> Stock - The binomial (wilson method) distribution was used. @@ -401,74 +463,70 @@

      Examples

      #> Quality 18 25 #> Preferred 49 61 #> Memorable 22 31 - -### Multiple Census -## Data in summarized form ... Schnabel method -mr5 <- with(PikeNY,mrClosed(n=n,m=m,R=R,method="Schnabel")) -plot(mr5) -plot(mr5,loess=TRUE) + +### Multiple Census +## Data in summarized form ... Schnabel method +mr5 <- with(PikeNY,mrClosed(n=n,m=m,R=R,method="Schnabel")) +plot(mr5) +plot(mr5,loess=TRUE) -summary(mr5) +summary(mr5) #> N #> [1,] 87 -summary(mr5,verbose=TRUE) +summary(mr5,verbose=TRUE) #> Used the Schnabel method with Chapman modification. #> N #> [1,] 87 -confint(mr5) +confint(mr5) #> 95% LCI 95% UCI #> [1,] 71 113 -confint(mr5,verbose=TRUE) +confint(mr5,verbose=TRUE) #> The normal distribution was used. #> 95% LCI 95% UCI #> [1,] 71 113 - -## Schumacher-Eschmeyer method -mr6 <- with(PikeNY,mrClosed(n=n,m=m,R=R,method="Schumacher")) -summary(mr6) + +## Schumacher-Eschmeyer method +mr6 <- with(PikeNY,mrClosed(n=n,m=m,R=R,method="Schumacher")) +summary(mr6) #> N #> [1,] 85 -confint(mr6) +confint(mr6) #> 95% LCI 95% UCI #> [1,] 76 96 - -### Capture history data summarized by capHistSum() -# ignore first column of ID numbers -ch2 <- capHistSum(PikeNYPartial1,cols2ignore="id") - -## Schnabel method -mr7 <- mrClosed(ch2,method="Schnabel") -plot(mr7) + +### Capture history data summarized by capHistSum() +# ignore first column of ID numbers +ch2 <- capHistSum(PikeNYPartial1,cols2ignore="id") + +## Schnabel method +mr7 <- mrClosed(ch2,method="Schnabel") +plot(mr7) -summary(mr7) +summary(mr7) #> N #> [1,] 128 -confint(mr7) +confint(mr7) #> 95% LCI 95% UCI #> [1,] 75 238 - +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/mrOpen.html b/docs/reference/mrOpen.html index 225f8b7c..3e55bd95 100644 --- a/docs/reference/mrOpen.html +++ b/docs/reference/mrOpen.html @@ -1,120 +1,144 @@ -Jolly-Seber analysis from multiple mark-recapture events from an open population. — jolly • FSAJolly-Seber analysis from multiple mark-recapture events from an open population. — jolly • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    This function takes the two parts of a Method B table and uses the Jolly-Seber method to estimate the population size at each possible sample period and the apparent survival rate and number of additional individuals added to the population between possible sample periods. This method assumes that the population is open.

    -
    -
    jolly(...)
    -
    -mrOpen(
    -  mb.top,
    -  mb.bot = NULL,
    -  type = c("Jolly", "Manly"),
    -  conf.level = 0.95,
    -  phi.full = TRUE
    -)
    -
    -# S3 method for mrOpen
    -summary(object, parm = c("N", "phi", "B", "M"), verbose = FALSE, ...)
    -
    -# S3 method for mrOpen
    -confint(
    -  object,
    -  parm = c("N", "phi", "B"),
    -  level = NULL,
    -  conf.level = NULL,
    -  verbose = FALSE,
    -  ...
    -)
    +
    +

    Usage

    +
    jolly(...)
    +
    +mrOpen(
    +  mb.top,
    +  mb.bot = NULL,
    +  type = c("Jolly", "Manly"),
    +  conf.level = 0.95,
    +  phi.full = TRUE
    +)
    +
    +# S3 method for mrOpen
    +summary(object, parm = c("N", "phi", "B", "M"), verbose = FALSE, ...)
    +
    +# S3 method for mrOpen
    +confint(
    +  object,
    +  parm = c("N", "phi", "B"),
    +  level = NULL,
    +  conf.level = NULL,
    +  verbose = FALSE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    ...

    Additional arguments for methods.

    + +
    mb.top

    A matrix that contains the “top” of the Method B table (i.e., a contingency table of capture sample (columns) and last seen sample (rows)) or an object of class CapHist from capHistSum. See details.

    + +
    mb.bot

    A data frame that contains the “bottom” of the Method B table (i.e., the number of marked fish in the sample (m), the number of unmarked fish in the sample (u), the total number of fish in the sample (n), and the number of marked fish returned to the population following the sample (R)).

    + +
    type

    A string that indicates whether the large sample (normal theory) method of Jolly (type="Jolly") or the “arbitrary” method of Manly (type="Manly") should be used to construct confidence intervals.

    + +
    conf.level

    A single numeric that indicates the level of confidence to use for constructing confidence intervals (default is 0.95). See details.

    + +
    phi.full

    A logical that indicates whether the standard error for phi should include only sampling variability (phi.full=FALSE) or sampling and individual variability (phi.full=TRUE,default).

    + +
    object

    An object from mrOpen (i.e., of class mrOpen).

    + +
    parm

    A string that identifies the model parameters for which to return summaries or confidence intervals. By default, all parameters are returned.

    + +
    verbose

    A logical that indicates if the observables and other notes should be printed in summary and if the type of confidence interval used should be printed in confint. See details.

    + +
    level

    Same as conf.level but used for compatibility with generic confint function.

    +
    -
    -

    Value

    -

    A list with the following items:

    • df A data frame that contains observable summaries from the data and estimates of the number of extant marked fish (M), population size for each possible sample period (N), apparent survival rate between each possible pair of sample periods (phi), and the number of additional individuals added to the population between each possible pair of sample periods (B). In addition to the estimates, values of the standard errors and the lower and upper confidence interval bounds for each parameter are provided (however, see the details above).

    • +
      +

      Value

      + + +

      A list with the following items:

      +

      +
      • df A data frame that contains observable summaries from the data and estimates of the number of extant marked fish (M), population size for each possible sample period (N), apparent survival rate between each possible pair of sample periods (phi), and the number of additional individuals added to the population between each possible pair of sample periods (B). In addition to the estimates, values of the standard errors and the lower and upper confidence interval bounds for each parameter are provided (however, see the details above).

      • type The provided type of confidence intervals that was used.

      • phi.full The provided logical that indicates the type of standard error for phi that was used.

      • conf.level The provided level of confidence that was used.

      -
      -

      Details

      +
      +

      Details

      jolly is just a convenience wrapper that produces the exact same results as mrOpen.

      If mb.top contains an object from the capHistSum function then mb.bot can be left missing. In this case, the function will extract the needed data from the methodB.top and methodB.bot portions of the CapHist class object.

      If mb.top is a matrix then it must be square, must have non-negative and no NA values in the upper triangle, and all NA values on the lower triangle and diagonal. If mb.bot is a matrix then it must have four rows named m, u, n, and R (see capHistSum for definitions), all values must be non-NA, and the first value of m must be 0. The last value of R can either be 0 or some positive number (it is ultimately ignored in all calculations).

      @@ -122,21 +146,21 @@

      Details

      The summary function returns estimates of M, N, phi, B, and their associated standard errors and, if verbose=TRUE the intermediate calculations of “observables” from the data -- n, m, R, r, and z.

      The level of confidence is not set in the confint function, in contrast to most confint functions. Rather the confidence level is set in the main mrOpen function.

      -
      -

      Testing

      +
      +

      Testing

      The formulas have been triple-checked against formulas in Pollock et al. (1990), Manly (1984), and Seber (2002).

      The results for the CutthroatAL data file (as analyzed in the example) was compared to results from the JOLLY program available at http://www.mbr-pwrc.usgs.gov/software/jolly.html. The r and z values matched, all M and N estimates match at one decimal place, all phi are within 0.001, and all B are within 0.7. The SE match for M except for two estimates that are within 0.1, match for N except for one estimate that is within 0.1, are within 0.001 for phi, and are within 1.3 for B (except for for the first estimate which is dramatically off).

      The results of mrOpen related to Table 4.4 of Pollock et al. (1990) match (to one decimal place) except for three estimates that are within 0.1% for N, match (to two decimal places) for phi except for where Pollock set phi>1 to phi=1, match for B except for Pollock set B<0 to B=0. The SE match (to two decimal places) for N except for N15 (which is within 0.5, <5%), match (to three decimal places) for phi except for phi15 (which is within 0.001, <0.5%), match (to two decimal places) for B except for B17 and B20 which are within 0.2 (<0.2%)

      All point estimates of M, N, phi, and B and the SE of phi match the results in Table 2.3 of Krebs (1989) (within minimal rounding error for a very small number of results). The SE of N results are not close to those of Krebs (1989) (who does not provide a formula for SE so the discrepancy cannot be explored). The SE of B results match those of Krebs (1989) for 5 of the 8 values and are within 5% for 2 of the other 3 values (the last estimate is off by 27%).

      For comparing to Jolly's data as presented in Tables 5.1 and 5.2 of Seber (2002), M was within 4 (less than 1.5%), N was within 3% (except N2 which was within 9%), phi was within 0.01 (less than 1.5

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      9-Abundance from Capture-Recapture Data and 11-Mortality.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Jolly, G.M. 1965. Explicit estimates from capture-recapture data with both death and immigration -- stochastic model. Biometrika, 52:225-247.

      Krebs, C.J. 1989. Ecological Methodology. Harper & Row Publishers, New York.

      Leslie, P.H. and D. Chitty. 1951. The estimation of population parameters from data obtained by means of the capture-recapture method. I. The maximum likelihood equations for estimating the death-rate. Biometrika, 38:269-292.

      @@ -145,21 +169,21 @@

      References

      Seber, G.A.F. 1965. A note on the multiple recapture census. Biometrika 52:249-259.

      Seber, G.A.F. 2002. The Estimation of Animal Abundance. Edward Arnold, second edition (reprinted).

      -
      -

      See also

      +
      +

      See also

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      -
      -

      Examples

      -
      ## First example -- capture histories summarized with capHistSum()
      -ch1 <- capHistSum(CutthroatAL,cols2use=-1)  # ignore first column of fish ID
      -ex1 <- mrOpen(ch1)
      -summary(ex1)
      +    
      +

      Examples

      +
      ## First example -- capture histories summarized with capHistSum()
      +ch1 <- capHistSum(CutthroatAL,cols2use=-1)  # ignore first column of fish ID
      +ex1 <- mrOpen(ch1)
      +summary(ex1)
       #>         M M.se     N  N.se   phi phi.se     B  B.se
       #> i=1    NA   NA    NA    NA 0.411  0.088    NA    NA
       #> i=2  36.6  6.4 561.1 117.9 0.349  0.045 198.6  48.2
      @@ -170,7 +194,7 @@ 

      Examples

      #> i=7 175.1 24.6 553.7 84.3 0.268 0.072 106.9 36.2 #> i=8 100.2 24.7 255.3 65.4 NA NA NA NA #> i=9 NA NA NA NA NA NA NA NA -summary(ex1,verbose=TRUE) +summary(ex1,verbose=TRUE) #> Observables: #> m n R r z #> i=1 0 89 89 26 NA @@ -194,7 +218,7 @@

      Examples

      #> i=7 175.1 24.6 553.7 84.3 0.268 0.072 106.9 36.2 #> i=8 100.2 24.7 255.3 65.4 NA NA NA NA #> i=9 NA NA NA NA NA NA NA NA -summary(ex1,parm="N") +summary(ex1,parm="N") #> N N.se #> i=1 NA NA #> i=2 561.1 117.9 @@ -205,7 +229,7 @@

      Examples

      #> i=7 553.7 84.3 #> i=8 255.3 65.4 #> i=9 NA NA -summary(ex1,parm=c("N","phi")) +summary(ex1,parm=c("N","phi")) #> N N.se phi phi.se #> i=1 NA NA 0.411 0.088 #> i=2 561.1 117.9 0.349 0.045 @@ -216,7 +240,7 @@

      Examples

      #> i=7 553.7 84.3 0.268 0.072 #> i=8 255.3 65.4 NA NA #> i=9 NA NA NA NA -confint(ex1) +confint(ex1) #> N.lci N.uci phi.lci phi.uci B.lci B.uci #> i=1 NA NA 0.237 0.584 NA NA #> i=2 330.0 792.1 0.261 0.436 104.0 293.1 @@ -227,7 +251,7 @@

      Examples

      #> i=7 388.4 719.1 0.127 0.409 36.0 177.8 #> i=8 127.2 383.4 NA NA NA NA #> i=9 NA NA NA NA NA NA -confint(ex1,parm="N") +confint(ex1,parm="N") #> N.lci N.uci #> i=1 NA NA #> i=2 330.0 792.1 @@ -238,7 +262,7 @@

      Examples

      #> i=7 388.4 719.1 #> i=8 127.2 383.4 #> i=9 NA NA -confint(ex1,parm=c("N","phi")) +confint(ex1,parm=c("N","phi")) #> N.lci N.uci phi.lci phi.uci #> i=1 NA NA 0.237 0.584 #> i=2 330.0 792.1 0.261 0.436 @@ -249,7 +273,7 @@

      Examples

      #> i=7 388.4 719.1 0.127 0.409 #> i=8 127.2 383.4 NA NA #> i=9 NA NA NA NA -confint(ex1,verbose=TRUE) +confint(ex1,verbose=TRUE) #> The Jolly method was used to construct confidence intervals. #> N.lci N.uci phi.lci phi.uci B.lci B.uci #> i=1 NA NA 0.237 0.584 NA NA @@ -261,31 +285,31 @@

      Examples

      #> i=7 388.4 719.1 0.127 0.409 36.0 177.8 #> i=8 127.2 383.4 NA NA NA NA #> i=9 NA NA NA NA NA NA - -## Second example - Jolly's data -- summarized data entered "by hand" -s1 <- rep(NA,13) -s2 <- c(10,rep(NA,12)) -s3 <- c(3,34,rep(NA,11)) -s4 <- c(5,18,33,rep(NA,10)) -s5 <- c(2,8,13,30,rep(NA,9)) -s6 <- c(2,4,8,20,43,rep(NA,8)) -s7 <- c(1,6,5,10,34,56,rep(NA,7)) -s8 <- c(0,4,0,3,14,19,46,rep(NA,6)) -s9 <- c(0,2,4,2,11,12,28,51,rep(NA,5)) -s10 <- c(0,0,1,2,3,5,17,22,34,rep(NA,4)) -s11 <- c(1,2,3,1,0,4,8,12,16,30,rep(NA,3)) -s12 <- c(0,1,3,1,1,2,7,4,11,16,26,NA,NA) -s13 <- c(0,1,0,2,3,3,2,10,9,12,18,35,NA) -jolly.top <- cbind(s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11,s12,s13) - -n <- c(54,146,169,209,220,209,250,176,172,127,123,120,142) -R <- c(54,143,164,202,214,207,243,175,169,126,120,120,0) -m <- c(0,10,37,56,53,77,112,86,110,84,77,72,95) -u <- n-m -jolly.bot <- rbind(m,u,n,R) - -ex2 <- mrOpen(jolly.top,jolly.bot) -summary(ex2,verbose=TRUE) + +## Second example - Jolly's data -- summarized data entered "by hand" +s1 <- rep(NA,13) +s2 <- c(10,rep(NA,12)) +s3 <- c(3,34,rep(NA,11)) +s4 <- c(5,18,33,rep(NA,10)) +s5 <- c(2,8,13,30,rep(NA,9)) +s6 <- c(2,4,8,20,43,rep(NA,8)) +s7 <- c(1,6,5,10,34,56,rep(NA,7)) +s8 <- c(0,4,0,3,14,19,46,rep(NA,6)) +s9 <- c(0,2,4,2,11,12,28,51,rep(NA,5)) +s10 <- c(0,0,1,2,3,5,17,22,34,rep(NA,4)) +s11 <- c(1,2,3,1,0,4,8,12,16,30,rep(NA,3)) +s12 <- c(0,1,3,1,1,2,7,4,11,16,26,NA,NA) +s13 <- c(0,1,0,2,3,3,2,10,9,12,18,35,NA) +jolly.top <- cbind(s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11,s12,s13) + +n <- c(54,146,169,209,220,209,250,176,172,127,123,120,142) +R <- c(54,143,164,202,214,207,243,175,169,126,120,120,0) +m <- c(0,10,37,56,53,77,112,86,110,84,77,72,95) +u <- n-m +jolly.bot <- rbind(m,u,n,R) + +ex2 <- mrOpen(jolly.top,jolly.bot) +summary(ex2,verbose=TRUE) #> Observables: #> m n R r z #> 1 0 54 54 24 NA @@ -317,7 +341,7 @@

      Examples

      #> 11 313.6 34.9 498.6 63.0 0.767 0.128 73.3 39.0 #> 12 273.7 36.2 453.6 66.6 NA NA NA NA #> 13 NA NA NA NA NA NA NA NA -confint(ex2,verbose=TRUE) +confint(ex2,verbose=TRUE) #> The Jolly method was used to construct confidence intervals. #> N.lci N.uci phi.lci phi.uci B.lci B.uci #> 1 NA NA 0.430 0.862 NA NA @@ -333,9 +357,9 @@

      Examples

      #> 11 375.1 622.0 0.516 1.019 -3.2 149.8 #> 12 323.1 584.1 NA NA NA NA #> 13 NA NA NA NA NA NA - -ex3 <- mrOpen(jolly.top,jolly.bot,type="Manly") -summary(ex3,verbose=TRUE) + +ex3 <- mrOpen(jolly.top,jolly.bot,type="Manly") +summary(ex3,verbose=TRUE) #> Observables: #> m n R r z #> 1 0 54 54 24 NA @@ -367,7 +391,7 @@

      Examples

      #> 11 313.6 498.6 0.767 73.3 #> 12 273.7 453.6 NA NA #> 13 NA NA NA NA -confint(ex3,verbose=TRUE) +confint(ex3,verbose=TRUE) #> Manly did not provide a method for constructing confidence intervals for B. #> The Manly method was used to construct confidence intervals. #> N.lci N.uci phi.lci phi.uci @@ -384,32 +408,28 @@

      Examples

      #> 11 384.5 751.9 0.535 1.034 #> 12 342.0 712.0 NA NA #> 13 NA NA NA NA - -## demonstrate use of jolly() -ex3a <- jolly(jolly.top,jolly.bot) - + +## demonstrate use of jolly() +ex3a <- jolly(jolly.top,jolly.bot) +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/nlsBoot.html b/docs/reference/nlsBoot.html index e03c5257..6fcd1f2f 100644 --- a/docs/reference/nlsBoot.html +++ b/docs/reference/nlsBoot.html @@ -1,197 +1,227 @@ -Associated S3 methods for nlsBoot from nlstools. — nlsBoot • FSAAssociated S3 methods for nlsBoot from nlstools. — nlsBoot • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Provides S3 methods to construct non-parametric bootstrap confidence intervals and hypothesis tests for parameter values and predicted values of the response variable for a nlsBoot object from the nlstools package.

    -
    -
    # S3 method for nlsBoot
    -confint(
    -  object,
    -  parm = NULL,
    -  level = conf.level,
    -  conf.level = 0.95,
    -  plot = FALSE,
    -  err.col = "black",
    -  err.lwd = 2,
    -  rows = NULL,
    -  cols = NULL,
    -  ...
    -)
    -
    -# S3 method for nlsBoot
    -predict(object, FUN, conf.level = 0.95, digits = NULL, ...)
    -
    -htest(object, ...)
    -
    -# S3 method for nlsBoot
    -htest(
    -  object,
    -  parm = NULL,
    -  bo = 0,
    -  alt = c("two.sided", "less", "greater"),
    -  plot = FALSE,
    -  ...
    -)
    +
    +

    Usage

    +
    # S3 method for nlsBoot
    +confint(
    +  object,
    +  parm = NULL,
    +  level = conf.level,
    +  conf.level = 0.95,
    +  plot = FALSE,
    +  err.col = "black",
    +  err.lwd = 2,
    +  rows = NULL,
    +  cols = NULL,
    +  ...
    +)
    +
    +# S3 method for nlsBoot
    +predict(object, FUN, conf.level = 0.95, digits = NULL, ...)
    +
    +htest(object, ...)
    +
    +# S3 method for nlsBoot
    +htest(
    +  object,
    +  parm = NULL,
    +  bo = 0,
    +  alt = c("two.sided", "less", "greater"),
    +  plot = FALSE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    object

    An object saved from nlsBoot().

    + +
    parm

    An integer that indicates which parameter to compute the confidence interval or hypothesis test for. The confidence interval Will be computed for all parameters if NULL.

    + +
    level

    Same as conf.level. Used for compatibility with the main confint.

    + +
    conf.level

    A level of confidence as a proportion.

    + +
    plot

    A logical that indicates whether a plot should be constructed. If confint, then a histogram of the parm parameters from the bootstrap samples with error bars that illustrate the bootstrapped confidence intervals will be constructed. If codehtest, then a histogram of the parm parameters with a vertical lines illustrating the bovalue will be constructed.

    + +
    err.col

    A single numeric or character that identifies the color for the error bars on the plot.

    + +
    err.lwd

    A single numeric that identifies the line width for the error bars on the plot.

    + +
    rows

    A numeric that contains the number of rows to use on the graphic.

    + +
    cols

    A numeric that contains the number of columns to use on the graphic.

    + +
    ...

    Additional arguments to functions.

    + +
    FUN

    The function to be applied for the prediction. See the examples.

    + +
    digits

    A single numeric that indicates the number of digits for the result.

    + +
    bo

    The null hypothesized parameter value.

    + +
    alt

    A string that identifies the “direction” of the alternative hypothesis. See details.

    +
    -
    -

    Value

    -

    confint returns a matrix with as many rows as columns (i.e., parameter estimates) in the object$coefboot data frame and two columns of the quantiles that correspond to the approximate confidence interval. -htest returns a matrix with two columns. The first column contains the hypothesized value sent to this function and the second column is the corresponding p-value. -predict returns a matrix with one row and three columns, with the first column holding the predicted value (i.e., the median prediction) and the last two columns holding the approximate confidence interval.

    +
    +

    Value

    + + +

    confint returns a matrix with as many rows as columns (i.e., parameter estimates) in the object$coefboot data frame and two columns of the quantiles that correspond to the approximate confidence interval.

    + + +

    htest returns a matrix with two columns. The first column contains the hypothesized value sent to this function and the second column is the corresponding p-value.

    + + +

    predict returns a matrix with one row and three columns, with the first column holding the predicted value (i.e., the median prediction) and the last two columns holding the approximate confidence interval.

    -
    -

    Details

    +
    +

    Details

    confint finds the two quantiles that have the proportion (1-conf.level)/2 of the bootstrapped parameter estimates below and above. This is an approximate 100conf.level% confidence interval.

    In htest the “direction” of the alternative hypothesis is identified by a string in the alt= argument. The strings may be "less" for a “less than” alternative, "greater" for a “greater than” alternative, or "two.sided" for a “not equals” alternative (the DEFAULT). In the one-tailed alternatives the p-value is the proportion of bootstrapped parameter estimates in object$coefboot that are extreme of the null hypothesized parameter value in bo. In the two-tailed alternative the p-value is twice the smallest of the proportion of bootstrapped parameter estimates above or below the null hypothesized parameter value in bo.

    In predict, a user-supplied function is applied to each row of the coefBoot object in a nlsBoot object and then finds the median and the two quantiles that have the proportion (1-conf.level)/2 of the bootstrapped predictions below and above. The median is returned as the predicted value and the quantiles are returned as an approximate 100conf.level% confidence interval for that prediction.

    -
    -

    See also

    +
    +

    See also

    Boot and related methods in car and summary.nlsBoot in nlstools.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    fnx <- function(days,B1,B2,B3) {
    -  if (length(B1) > 1) {
    -    B2 <- B1[2]
    -    B3 <- B1[3]
    -    B1 <- B1[1]
    -  }
    -  B1/(1+exp(B2+B3*days))
    -}
    -nl1 <- nls(cells~fnx(days,B1,B2,B3),data=Ecoli,
    -           start=list(B1=6,B2=7.2,B3=-1.45))
    -if (require(nlstools)) {
    -  nl1.bootn <-  nlstools::nlsBoot(nl1,niter=99) # too few to be useful
    -  confint(nl1.bootn,"B1")
    -  confint(nl1.bootn,c(2,3))
    -  confint(nl1.bootn,conf.level=0.90)
    -  confint(nl1.bootn,plot=TRUE)
    -  predict(nl1.bootn,fnx,days=3)
    -  predict(nl1.bootn,fnx,days=1:3)
    -  htest(nl1.bootn,1,bo=6,alt="less")
    -}
    +    
    +

    Examples

    +
    fnx <- function(days,B1,B2,B3) {
    +  if (length(B1) > 1) {
    +    B2 <- B1[2]
    +    B3 <- B1[3]
    +    B1 <- B1[1]
    +  }
    +  B1/(1+exp(B2+B3*days))
    +}
    +nl1 <- nls(cells~fnx(days,B1,B2,B3),data=Ecoli,
    +           start=list(B1=6,B2=7.2,B3=-1.45))
    +if (require(nlstools)) {
    +  nl1.bootn <-  nlstools::nlsBoot(nl1,niter=99) # too few to be useful
    +  confint(nl1.bootn,"B1")
    +  confint(nl1.bootn,c(2,3))
    +  confint(nl1.bootn,conf.level=0.90)
    +  confint(nl1.bootn,plot=TRUE)
    +  predict(nl1.bootn,fnx,days=3)
    +  predict(nl1.bootn,fnx,days=1:3)
    +  htest(nl1.bootn,1,bo=6,alt="less")
    +}
     #> Loading required package: nlstools
     #> 
     #> 'nlstools' has been loaded.
     #> IMPORTANT NOTICE: Most nonlinear regression models and data set examples
     #> related to predictive microbiolgy have been moved to the package 'nlsMicrobio'
     #> Error in nlstools::nlsBoot(nl1, niter = 99): Procedure aborted: the fit only converged in 1 % during bootstrapping
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/nlsTracePlot.html b/docs/reference/nlsTracePlot.html index e5526314..1d8801f6 100644 --- a/docs/reference/nlsTracePlot.html +++ b/docs/reference/nlsTracePlot.html @@ -1,190 +1,210 @@ -Adds model fits from nls iterations to active plot. — nlsTracePlot • FSAAdds model fits from nls iterations to active plot. — nlsTracePlot • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Adds model fits from iterations of the nls algorithm as returned when trace=TRUE. Useful for diagnosing model fitting problems or issues associated with starting values.

    -
    -
    nlsTracePlot(
    -  object,
    -  fun,
    -  from = NULL,
    -  to = NULL,
    -  n = 199,
    -  lwd = 2,
    -  col = NULL,
    -  rev.col = FALSE,
    -  legend = "topright",
    -  cex.leg = 0.9,
    -  box.lty.leg = 0,
    -  add = TRUE
    -)
    +
    +

    Usage

    +
    nlsTracePlot(
    +  object,
    +  fun,
    +  from = NULL,
    +  to = NULL,
    +  n = 199,
    +  lwd = 2,
    +  col = NULL,
    +  rev.col = FALSE,
    +  legend = "topright",
    +  cex.leg = 0.9,
    +  box.lty.leg = 0,
    +  add = TRUE
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    object

    An object saved from nls or from capture.output using try with nls. See details.

    + +
    fun

    A function that represents the model being fit in nls. This must take the x-axis variable as the first argument and model parameters as a vector in the second argument. See details.

    + +
    from, to

    The range over which the function will be plotted. Defaults to range of the x-axis of the active plot.

    + +
    n

    The number of value at which to evaluate the function for plotting (i.e., the number of values from from to to). Larger values make smoother lines.

    + +
    lwd

    A numeric used to indicate the line width of the fitted line.

    + +
    col

    A single character string that is a palette from hcl.pals or a vector of character strings containing colors for the fitted lines at each trace.

    + +
    rev.col

    A logical that indicates that the order of colors for plotting the lines should be reversed.

    + +
    legend

    Controls use and placement of the legend. See details.

    + +
    cex.leg

    A single numeric value that represents the character expansion value for the legend. Ignored if legend=FALSE.

    + +
    box.lty.leg

    A single numeric values that indicates the type of line to use for the box around the legend. The default is to not plot a box.

    + +
    add

    A logical indicating whether the lines should be added to the existing plot (defaults to =TRUE).

    +
    -
    -

    Value

    -

    A matrix with the residual sum-of-squares in the first column and parameter estimates in the remaining columns for each iteration (rows) of nls as provided when trace=TRUE.

    +
    +

    Value

    + + +

    A matrix with the residual sum-of-squares in the first column and parameter estimates in the remaining columns for each iteration (rows) of nls as provided when trace=TRUE.

    -
    -

    Details

    +
    +

    Details

    Nonlinear models fit with the nls function start with starting values for model parameters and iteratively search for other model parameters that continuously reduce the residual sum-of-squares (RSS) until some pre-determined criterion suggest that the RSS cannot be (substantially) further reduced. With good starting values and well-behaved data, the minimum RSS may be found in a few (<10) iterations. However, poor starting values or poorly behaved data may lead to a prolonged and possibly failed search. An understanding of the iterations in a prolonged or failed search may help identify the failure and lead to choices that may result in a successful search. The trace=TRUE argument of nls allows one to see the values at each iterative step. The function documented here plots the “trace” results at each iteration on a previously existing plot of the data. This creates a visual of the iterative process.

    The object argument may be an object saved from a successful run of nls. See the examples with SpotVA1 and CodNorwegion.

    However, if nls fails to converge to a solution then no useful object is returned. In this case, trace=TRUE must be added to the failed nls call. The call is then wrapped in try to work-around the failed convergence error. This is also wrapped in capture.output to capture the “trace” results. This is then saved to an object that which can then be the object of the function documented here. This process is illustrated with the example using BSkateGB.

    The function in fun is used to make predictions given the model parameter values at each step of the iteration. This function must accept the explanatory/independent variable as its first argument and values for all model parameters in a vector as its second argument. These types of functions are returned by vbFuns, GompertzFuns, logisticFuns, and RichardsFuns for common growth models and srFuns for common stock-recruitment models. See the examples.

    -
    -

    Note

    +
    +

    Note

    The position of the “legend” can be controlled in three ways. First, if legend=TRUE, then the R console is suspended until the user places the legend on the plot by clicking on the point where the upper-left corner of the legend should appear. Second, legend= can be set to one of "bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right" and "center". In this case, the legend will be placed inside the plot frame at the given location. Finally, legend= can be set to a vector of length two which identifies the plot coordinates for the upper-left corner of where the legend should be placed. A legend will not be drawn if legend=FALSE or legend=NULL.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Examples following a successful fit
    -vb1 <- vbFuns()
    -fit1 <- nls(tl~vb1(age,Linf,K,t0),data=SpotVA1,start=list(Linf=12,K=0.3,t0=0))
    -plot(tl~age,data=SpotVA1,pch=21,bg="gray40")
    -nlsTracePlot(fit1,vb1,legend="bottomright")
    +    
    +

    Examples

    +
    ## Examples following a successful fit
    +vb1 <- vbFuns()
    +fit1 <- nls(tl~vb1(age,Linf,K,t0),data=SpotVA1,start=list(Linf=12,K=0.3,t0=0))
    +plot(tl~age,data=SpotVA1,pch=21,bg="gray40")
    +nlsTracePlot(fit1,vb1,legend="bottomright")
     
    -
    -r1 <- srFuns("Ricker")
    -fitSR1 <- nls(log(recruits)~log(r1(stock,a,b)),data=CodNorwegian,start=list(a=3,b=0.03))
    -plot(recruits~stock,data=CodNorwegian,pch=21,bg="gray40",xlim=c(0,200))
    -nlsTracePlot(fitSR1,r1)
    +
    +r1 <- srFuns("Ricker")
    +fitSR1 <- nls(log(recruits)~log(r1(stock,a,b)),data=CodNorwegian,start=list(a=3,b=0.03))
    +plot(recruits~stock,data=CodNorwegian,pch=21,bg="gray40",xlim=c(0,200))
    +nlsTracePlot(fitSR1,r1)
     
    -
    -# no plot, but returns trace results as a matrix
    -( tmp <- nlsTracePlot(fitSR1,r1,add=FALSE) )
    +
    +# no plot, but returns trace results as a matrix
    +( tmp <- nlsTracePlot(fitSR1,r1,add=FALSE) )
     #>          [,1]       [,2]
     #> [1,] 3.000000 0.03000000
     #> [2,] 5.038795 0.01836117
     #> [3,] 5.850261 0.01836117
     #> [4,] 5.918852 0.01836117
     #> [5,] 5.919256 0.01836117
    -
    -if (FALSE) {
    -if (require(FSAdata)) {
    -  data(BSkateGB,package="FSAdata")
    -  wtr <- droplevels(subset(BSkateGB,season=="winter"))
    -  bh1 <- srFuns()
    -  trc <- capture.output(try(
    -  fitSR1 <- nls(recruits~bh1(spawners,a,b),wtr,
    -                start=srStarts(recruits~spawners,data=wtr),trace=TRUE)
    -  ))
    -  plot(recruits~spawners,data=wtr,pch=21,bg="gray40")
    -  nlsTracePlot(trc,bh1)
    -  # zoom in on y-axis
    -  plot(recruits~spawners,data=wtr,pch=21,bg="gray40",ylim=c(0.02,0.05))
    -  nlsTracePlot(trc,bh1,legend="top")
    -  # return just the trace results
    -  ( tmp <- nlsTracePlot(trc,bh1,add=FALSE) )
    -}
    -}
    -
    +
    +if (FALSE) {
    +if (require(FSAdata)) {
    +  data(BSkateGB,package="FSAdata")
    +  wtr <- droplevels(subset(BSkateGB,season=="winter"))
    +  bh1 <- srFuns()
    +  trc <- capture.output(try(
    +  fitSR1 <- nls(recruits~bh1(spawners,a,b),wtr,
    +                start=srStarts(recruits~spawners,data=wtr),trace=TRUE)
    +  ))
    +  plot(recruits~spawners,data=wtr,pch=21,bg="gray40")
    +  nlsTracePlot(trc,bh1)
    +  # zoom in on y-axis
    +  plot(recruits~spawners,data=wtr,pch=21,bg="gray40",ylim=c(0.02,0.05))
    +  nlsTracePlot(trc,bh1,legend="top")
    +  # return just the trace results
    +  ( tmp <- nlsTracePlot(trc,bh1,add=FALSE) )
    +}
    +}
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/perc.html b/docs/reference/perc.html index 03a7ce3b..d3e36794 100644 --- a/docs/reference/perc.html +++ b/docs/reference/perc.html @@ -1,147 +1,155 @@ -Computes the percentage of values in a vector less than or greater than (and equal to) some value. — perc • FSAComputes the percentage of values in a vector less than or greater than (and equal to) some value. — perc • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes the percentage of values in a vector less than or greater than (and equal to) a user-supplied value.

    -
    -
    perc(
    -  x,
    -  val,
    -  dir = c("geq", "gt", "leq", "lt"),
    -  na.rm = TRUE,
    -  digits = getOption("digits")
    -)
    +
    +

    Usage

    +
    perc(
    +  x,
    +  val,
    +  dir = c("geq", "gt", "leq", "lt"),
    +  na.rm = TRUE,
    +  digits = getOption("digits")
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A numeric vector.

    + +
    val

    A single numeric value.

    + +
    dir

    A string that indicates whether the percentage is for values in x that are “greater than and equal” "geq", “greater than” "gt", “less than and equal” "leq", “less than” "lt" the value in val.

    + +
    na.rm

    A logical that indicates whether NA values should be removed (DEFAULT) from x or not.

    + +
    digits

    A single numeric that indicates the number of decimals the percentage should be rounded to.

    +
    -
    -

    Value

    -

    A single numeric that is the percentage of values in x that meet the criterion in dir relative to val.

    +
    +

    Value

    + + +

    A single numeric that is the percentage of values in x that meet the criterion in dir relative to val.

    -
    -

    Details

    +
    +

    Details

    This function is most useful when used with an apply-type of function.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## vector of values
    -( tmp <- c(1:8,NA,NA) )
    +    
    +

    Examples

    +
    ## vector of values
    +( tmp <- c(1:8,NA,NA) )
     #>  [1]  1  2  3  4  5  6  7  8 NA NA
    -
    -## percentages excluding NA values
    -perc(tmp,5)
    +
    +## percentages excluding NA values
    +perc(tmp,5)
     #> [1] 50
    -perc(tmp,5,"gt")
    +perc(tmp,5,"gt")
     #> [1] 37.5
    -perc(tmp,5,"leq")
    +perc(tmp,5,"leq")
     #> [1] 62.5
    -perc(tmp,5,"lt")
    +perc(tmp,5,"lt")
     #> [1] 50
    -
    -## percentages including NA values
    -perc(tmp,5,na.rm=FALSE)
    +
    +## percentages including NA values
    +perc(tmp,5,na.rm=FALSE)
     #> [1] 40
    -perc(tmp,5,"gt",na.rm=FALSE)
    +perc(tmp,5,"gt",na.rm=FALSE)
     #> [1] 30
    -perc(tmp,5,"leq",na.rm=FALSE)
    +perc(tmp,5,"leq",na.rm=FALSE)
     #> [1] 50
    -perc(tmp,5,"lt",na.rm=FALSE)
    +perc(tmp,5,"lt",na.rm=FALSE)
     #> [1] 40
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/plotAB.html b/docs/reference/plotAB.html index cfb9a3c7..8cf34b07 100644 --- a/docs/reference/plotAB.html +++ b/docs/reference/plotAB.html @@ -1,213 +1,257 @@ -Construct traditional (Campana-like) age-bias plots. — plotAB • FSAConstruct traditional (Campana-like) age-bias plots. — plotAB • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Constructs a traditional (e.g., like that described in Campana et al. (1995)) age-bias plot to visualize potential differences in paired age estimates. Ages may be from, for example, two readers of the same structure, one reader at two times, two structures (e.g., scales, spines, otoliths), or one structure and known ages.

    -
    -
    plotAB(
    -  x,
    -  what = c("bias", "Campana", "numbers"),
    -  xlab = x$ref.lab,
    -  ylab = x$nref.lab,
    -  xlim = NULL,
    -  ylim = NULL,
    -  yaxt = graphics::par("yaxt"),
    -  xaxt = graphics::par("xaxt"),
    -  col.agree = "gray60",
    -  lwd.agree = lwd,
    -  lty.agree = 2,
    -  lwd = 1,
    -  sfrac = 0,
    -  pch.mean = 19,
    -  pch.mean.sig = 21,
    -  cex.mean = lwd,
    -  col.CI = "black",
    -  col.CIsig = "red",
    -  lwd.CI = lwd,
    -  sfrac.CI = sfrac,
    -  show.n = FALSE,
    -  nYpos = 1.03,
    -  cex.n = 0.75,
    -  cex.numbers = 0.75,
    -  col.numbers = "black",
    -  ...
    -)
    +
    +

    Usage

    +
    plotAB(
    +  x,
    +  what = c("bias", "Campana", "numbers"),
    +  xlab = x$ref.lab,
    +  ylab = x$nref.lab,
    +  xlim = NULL,
    +  ylim = NULL,
    +  yaxt = graphics::par("yaxt"),
    +  xaxt = graphics::par("xaxt"),
    +  col.agree = "gray60",
    +  lwd.agree = lwd,
    +  lty.agree = 2,
    +  lwd = 1,
    +  sfrac = 0,
    +  pch.mean = 19,
    +  pch.mean.sig = 21,
    +  cex.mean = lwd,
    +  col.CI = "black",
    +  col.CIsig = "red",
    +  lwd.CI = lwd,
    +  sfrac.CI = sfrac,
    +  show.n = FALSE,
    +  nYpos = 1.03,
    +  cex.n = 0.75,
    +  cex.numbers = 0.75,
    +  col.numbers = "black",
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    An object of class ageBias, usually a result from ageBias.

    + +
    what

    A string that indicates what type of plot to construct. See details.

    + +
    xlab, ylab

    A string label for the x-axis (reference) or y-axis (non-reference) age estimates, respectively.

    + +
    xlim, ylim

    A numeric vector of length 2 that contains the limits of the x-axis (reference ages) or y-axis (non-reference ages), respectively.

    + +
    xaxt, yaxt

    A string which specifies the x- and y-axis types. Specifying “n” suppresses plotting of the axis. See ?par.

    + +
    col.agree

    A string or numeric for the color of the 1:1 or zero (if difference=TRUE) reference line.

    + +
    lwd.agree

    A numeric for the line width of the 1:1 or zero (if difference=TRUE) reference line.

    + +
    lty.agree

    A numeric for the line type of the 1:1 or zero (if difference=TRUE) reference line.

    + +
    lwd

    A numeric that controls the separate ‘lwd’ argument (e.g., lwd.CI and lwd.range).

    + +
    sfrac

    A numeric that controls the separate ‘sfrac’ arguments (e.g., sfrac.CI and sfrac.range). See sfrac in plotCI of plotrix.

    + +
    pch.mean

    A numeric for the plotting character used for the mean values when the means are considered insignificant.

    + +
    pch.mean.sig

    A numeric for the plotting character for the mean values when the means are considered significant.

    + +
    cex.mean

    A character expansion value for the size of the mean symbol in pch.mean and pch.mean.sig.

    + +
    col.CI

    A string or numeric for the color of confidence interval bars that are considered non-significant.

    + +
    col.CIsig

    A string or numeric for the color of confidence interval bars that are considered significant.

    + +
    lwd.CI

    A numeric for the line width of the confidence interval bars.

    + +
    sfrac.CI

    A numeric for the size of the ends of the confidence interval bars. See sfrac in plotCI of plotrix.

    + +
    show.n

    A logical for whether the sample sizes for each level of the x-axis variable is shown (=TRUE, default) or not (=FALSE).

    + +
    nYpos

    A numeric for the relative Y position of the sample size values when show.n=TRUE. For example, if nYpos=1.03 then the sample size values will be centered at 3 percent above the top end of the y-axis.

    + +
    cex.n

    A character expansion value for the size of the sample size values.

    + +
    cex.numbers

    A character expansion value for the size of the numbers plotted when what="numbers" is used.

    + +
    col.numbers

    A string for the color of the numbers plotted when what="numbers" is used.

    + +
    ...

    Additional arguments for methods.

    +
    -
    -

    Value

    -

    Nothing, but see details for a description of the plot that is produced.

    +
    +

    Value

    + + +

    Nothing, but see details for a description of the plot that is produced.

    -
    -

    Details

    +
    +

    Details

    Two types of plots for visualizing differences between sets of two age estimates may be created. The reference ages are plotted on the x-axis and the nonreference ages are on the y-axis. The 1:1 (45 degree) agreement line is shown for comparative purposes. The default plot (using what="bias") was inspired by the age bias plot introduced by Campana et al. (1995). The default settings for this age bias plot show the mean and confidence interval for the nonreference ages at each of the reference ages. The level of confidence is controlled by sig.level= given in the original ageBias call (i.e., confidence level is 100*(1-sig.level)). Confidence intervals are only shown if the sample size is greater than the value in min.n.CI= (also from the original call to ageBias). Confidence intervals plotted in red with an open dot (by default; these can be changed with col.CIsig and pch.mean.sig, respectively) do not contain the reference age (see discussion of t-tests in ageBias). Sample sizes at each reference age are shown if show.n=TRUE. The position of the sample sizes is controlled with nYpos=, whereas their size is controlled with cex.n. Arguments may be used to nearly replicate the age bias plot as introduced by Campana et al. (1995) as shown in the examples.

    The frequency of observations at each unique (x,y) coordinate are shown by using what="numbers" in plotAB. The size of the numbers is controlled with cex.numbers.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    4-Age Comparisons. This is most of the original functionality that was in plot in the book. See examples.

    -
    -

    References

    +
    +

    References

    Campana, S.E., M.C. Annand, and J.I. McMillan. 1995. Graphical and statistical methods for determining the consistency of age determinations. Transactions of the American Fisheries Society 124:131-138. [Was (is?) available from http://www.bio.gc.ca/otoliths/documents/Campana%20et%20al%201995%20TAFS.pdf.]

    -
    -

    See also

    +
    +

    See also

    See ageBias and its plot method for what I consider a better age-bias plot; agePrecision for measures of precision between pairs of age estimates; and compare2 in fishmethods for similar functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    # Must create ageBias object first
    -ab1 <- ageBias(scaleC~otolithC,data=WhitefishLC,
    -               ref.lab="Otolith Age",nref.lab="Scale Age")
    -               
    -# Default plot
    -plotAB(ab1)
    +    
    +

    Examples

    +
    # Must create ageBias object first
    +ab1 <- ageBias(scaleC~otolithC,data=WhitefishLC,
    +               ref.lab="Otolith Age",nref.lab="Scale Age")
    +               
    +# Default plot
    +plotAB(ab1)
     
    -
    -# Very close to Campana et al. (2001)
    -plotAB(ab1,pch.mean.sig=19,col.CIsig="black",sfrac=0.01,
    -       ylim=c(-1,23),xlim=c(-1,23))
    +
    +# Very close to Campana et al. (2001)
    +plotAB(ab1,pch.mean.sig=19,col.CIsig="black",sfrac=0.01,
    +       ylim=c(-1,23),xlim=c(-1,23))
     
    -# Show sample sizes (different position and size than default)
    -plotAB(ab1,show.n=TRUE,nYpos=0.02,cex.n=0.5)
    +# Show sample sizes (different position and size than default)
    +plotAB(ab1,show.n=TRUE,nYpos=0.02,cex.n=0.5)
     
    -
    -# Traditional numbers plot
    -plotAB(ab1,what="numbers") 
    +
    +# Traditional numbers plot
    +plotAB(ab1,what="numbers") 
     
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/poiCI.html b/docs/reference/poiCI.html index 3ec3f06f..4a8cef49 100644 --- a/docs/reference/poiCI.html +++ b/docs/reference/poiCI.html @@ -1,161 +1,169 @@ -Confidence interval for Poisson counts. — poiCI • FSAConfidence interval for Poisson counts. — poiCI • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes a confidence interval for the Poisson counts.

    -
    -
    poiCI(
    -  x,
    -  conf.level = 0.95,
    -  type = c("exact", "daly", "byar", "asymptotic"),
    -  verbose = FALSE
    -)
    +
    +

    Usage

    +
    poiCI(
    +  x,
    +  conf.level = 0.95,
    +  type = c("exact", "daly", "byar", "asymptotic"),
    +  verbose = FALSE
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A single number or vector that represents the number of observed successes.

    + +
    conf.level

    A number that indicates the level of confidence to use for constructing confidence intervals (default is 0.95).

    + +
    type

    A string that identifies the type of method to use for the calculations. See details.

    + +
    verbose

    A logical that indicates whether x should be included in the returned matrix (=TRUE) or not (=FALSE; DEFAULT).

    +
    -
    -

    Value

    -

    A #x2 matrix that contains the lower and upper confidence interval bounds as columns and, if verbose=TRUEx.

    +
    +

    Value

    + + +

    A #x2 matrix that contains the lower and upper confidence interval bounds as columns and, if verbose=TRUE

    +

    +

    x.

    -
    -

    Details

    +
    +

    Details

    Computes a CI for the Poisson counts using the exact, gamma distribution (daly`), Byar's (byar), or normal approximation (asymptotic) methods.

    The pois.daly function gives essentially identical answers to the pois.exact function except when x=0. When x=0, for the upper confidence limit pois.exact returns 3.689 and pois.daly returns 2.996.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com, though this is largely based on pois.exact, pois.daly, pois.byar, and pois.approx from the old epitools package.

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com, though this is largely based on pois.exact, pois.daly, pois.byar, and pois.approx from the old epitools package.

    -
    -

    Examples

    -
    ## Demonstrates using all types at once
    -poiCI(12)
    +    
    +

    Examples

    +
    ## Demonstrates using all types at once
    +poiCI(12)
     #>             95% LCI  95% UCI
     #> Exact      6.200603 20.96156
     #> Daly       6.200575 20.96159
     #> Byar       6.552977 20.32447
     #> Asymptotic 5.210486 18.78951
    -
    -## Selecting types
    -poiCI(12,type="daly")
    +
    +## Selecting types
    +poiCI(12,type="daly")
     #>   95% LCI  95% UCI
     #>  6.200575 20.96159
    -poiCI(12,type="byar")
    +poiCI(12,type="byar")
     #>   95% LCI  95% UCI
     #>  6.552977 20.32447
    -poiCI(12,type="asymptotic")
    +poiCI(12,type="asymptotic")
     #>   95% LCI  95% UCI
     #>  5.210486 18.78951
    -poiCI(12,type="asymptotic",verbose=TRUE)
    +poiCI(12,type="asymptotic",verbose=TRUE)
     #>             x  95% LCI  95% UCI
     #> Asymptotic 12 5.210486 18.78951
    -poiCI(12,type=c("exact","daly"))
    +poiCI(12,type=c("exact","daly"))
     #>        95% LCI  95% UCI
     #> Exact 6.200603 20.96156
     #> Daly  6.200575 20.96159
    -poiCI(12,type=c("exact","daly"),verbose=TRUE)
    +poiCI(12,type=c("exact","daly"),verbose=TRUE)
     #>        x  95% LCI  95% UCI
     #> Exact 12 6.200603 20.96156
     #> Daly  12 6.200575 20.96159
    -
    -## Demonstrates use with multiple inputs
    -poiCI(c(7,10),type="exact")
    +
    +## Demonstrates use with multiple inputs
    +poiCI(c(7,10),type="exact")
     #>   95% LCI  95% UCI
     #>  2.814358 14.42268
     #>  4.795389 18.39036
    -poiCI(c(7,10),type="exact",verbose=TRUE)
    +poiCI(c(7,10),type="exact",verbose=TRUE)
     #>       x  95% LCI  95% UCI
     #> [1,]  7 2.814358 14.42268
     #> [2,] 10 4.795389 18.39036
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/psdAdd.html b/docs/reference/psdAdd.html index 7213877b..434edbde 100644 --- a/docs/reference/psdAdd.html +++ b/docs/reference/psdAdd.html @@ -1,168 +1,188 @@ -Creates a vector of Gabelhouse lengths for each species in an entire data frame. — psdAdd • FSACreates a vector of Gabelhouse lengths for each species in an entire data frame. — psdAdd • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Creates a vector of the Gabelhouse lengths specific to a species for all individuals in an entire data frame.

    -
    -
    psdAdd(len, ...)
    -
    -# S3 method for default
    -psdAdd(
    -  len,
    -  species,
    -  units = c("mm", "cm", "in"),
    -  use.names = TRUE,
    -  addSpec = NULL,
    -  addLens = NULL,
    -  verbose = TRUE,
    -  ...
    -)
    -
    -# S3 method for formula
    -psdAdd(
    -  len,
    -  data = NULL,
    -  units = c("mm", "cm", "in"),
    -  use.names = TRUE,
    -  addSpec = NULL,
    -  addLens = NULL,
    -  verbose = TRUE,
    -  ...
    -)
    +
    +

    Usage

    +
    psdAdd(len, ...)
    +
    +# S3 method for default
    +psdAdd(
    +  len,
    +  species,
    +  units = c("mm", "cm", "in"),
    +  use.names = TRUE,
    +  addSpec = NULL,
    +  addLens = NULL,
    +  verbose = TRUE,
    +  ...
    +)
    +
    +# S3 method for formula
    +psdAdd(
    +  len,
    +  data = NULL,
    +  units = c("mm", "cm", "in"),
    +  use.names = TRUE,
    +  addSpec = NULL,
    +  addLens = NULL,
    +  verbose = TRUE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    len

    A numeric vector that contains lengths measurements or a formula of the form len~spec where “len” generically represents the length variable and “spec” generically represents the species variable. Note that this formula can only contain two variables and must have the length variable on the left-hand-side and the species variable on the right-hand-side.

    + +
    ...

    Not used.

    + +
    species

    A character or factor vector that contains the species names. Ignored if len is a formula.

    + +
    units

    A string that indicates the type of units used for the lengths. Choices are mm for millimeters (DEFAULT), cm for centimeters, and in for inches.

    + +
    use.names

    A logical that indicates whether the vector returned is numeric (=FALSE) or string (=TRUE; default) representations of the Gabelhouse lengths. See details.

    + +
    addSpec

    A character vector of species names for which addLens will be provided.

    + +
    addLens

    A numeric vector of lengths that should be used in addition to the Gabelhouse lengths for the species in addSpec. See examples.

    + +
    verbose

    A logical that indicates whether detailed messages about species without Gabelhouse lengths or with no recorded values should be printed or not.

    + +
    data

    A data.frame that minimally contains the length measurements and species names if len is a formula.

    +
    -
    -

    Value

    -

    A numeric or factor vector that contains the Gabelhouse length categories.

    +
    +

    Value

    + + +

    A numeric or factor vector that contains the Gabelhouse length categories.

    -
    -

    Details

    +
    +

    Details

    This computes a vector that contains the Gabelhouse lengths specific to each species for all individuals in an entire data frame. The vector can be appended to an existing data.frame to create a variable that contains the Gabelhouse lengths for each individual. The Gabelhouse length value will be NA for each individual for which Gabelhouse length definitions do not exist in PSDlit. Species names in the data.frame must be the same as those used in PSDlit. See the examples for one method for changing species names to something that this function will recognize.

    Individuals shorter than “stock” length will be listed as substock if use.names=TRUE or 0 if use.names=FALSE.

    Additional lengths to be used for a species may be included by giving a vector of species names in addSpec and a corresponding vector of additional lengths in addLens. Note, however, that use.names will be reset to FALSE if addSpec and addLens are specified, as there is no way to order the names for all species when additional lengths are used.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    6-Size Structure.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    Guy, C.S., R.M. Neumann, and D.W. Willis. 2006. New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS). Fisheries 31:86-87. [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.]

    Guy, C.S., R.M. Neumann, D.W. Willis, and R.O. Anderson. 2006. Proportional size distribution (PSD): A further refinement of population size structure index terminology. Fisheries 32:348. [Was (is?) from http://pubstorage.sdstate.edu/wfs/450-F.pdf.]

    Willis, D.W., B.R. Murphy, and C.S. Guy. 1993. Stock density indices: development, use, and limitations. Reviews in Fisheries Science 1:203-222. [Was (is?) from http://web1.cnre.vt.edu/murphybr/web/Readings/Willis%20et%20al.pdf.]

    -
    -

    See also

    +
    +

    See also

    psdVal, psdCalc, psdPlot, PSDlit, and wrAdd for related functions. See mapvalues for help in changing species names to match those in PSDlit.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Create random data for three species
    -# only for repeatability
    -set.seed(345234534)
    -dbg <- data.frame(species=factor(rep(c("Bluegill"),30)),
    -                  tl=round(rnorm(30,130,50),0))
    -dbg$wt <- round(4.23e-06*dbg$tl^3.316+rnorm(30,0,10),1)
    -dlb <- data.frame(species=factor(rep(c("Largemouth Bass"),30)),
    -                  tl=round(rnorm(30,350,60),0))
    -dlb$wt <- round(2.96e-06*dlb$tl^3.273+rnorm(30,0,60),1)
    -dbt <- data.frame(species=factor(rep(c("Bluefin Tuna"),30)),
    -                  tl=round(rnorm(30,1900,300),0))
    -dbt$wt <- round(4.5e-05*dbt$tl^2.8+rnorm(30,0,6000),1)
    -df <- rbind(dbg,dlb,dbt)
    -str(df)
    +    
    +

    Examples

    +
    ## Create random data for three species
    +# only for repeatability
    +set.seed(345234534)
    +dbg <- data.frame(species=factor(rep(c("Bluegill"),30)),
    +                  tl=round(rnorm(30,130,50),0))
    +dbg$wt <- round(4.23e-06*dbg$tl^3.316+rnorm(30,0,10),1)
    +dlb <- data.frame(species=factor(rep(c("Largemouth Bass"),30)),
    +                  tl=round(rnorm(30,350,60),0))
    +dlb$wt <- round(2.96e-06*dlb$tl^3.273+rnorm(30,0,60),1)
    +dbt <- data.frame(species=factor(rep(c("Bluefin Tuna"),30)),
    +                  tl=round(rnorm(30,1900,300),0))
    +dbt$wt <- round(4.5e-05*dbt$tl^2.8+rnorm(30,0,6000),1)
    +df <- rbind(dbg,dlb,dbt)
    +str(df)
     #> 'data.frame':	90 obs. of  3 variables:
     #>  $ species: Factor w/ 3 levels "Bluegill","Largemouth Bass",..: 1 1 1 1 1 1 1 1 1 1 ...
     #>  $ tl     : num  42 73 152 184 167 94 138 169 86 157 ...
     #>  $ wt     : num  -12.5 4.8 86.6 134.4 91.9 ...
    -
    -## Examples (non-dplyr)
    -# Add variable using category names -- formula notation
    -df$PSD <- psdAdd(tl~species,data=df)
    +
    +## Examples (non-dplyr)
    +# Add variable using category names -- formula notation
    +df$PSD <- psdAdd(tl~species,data=df)
     #> No known Gabelhouse (PSD) lengths for: Bluefin Tuna
    -head(df)
    +head(df)
     #>    species  tl    wt      PSD
     #> 1 Bluegill  42 -12.5 substock
     #> 2 Bluegill  73   4.8 substock
    @@ -170,10 +190,10 @@ 

    Examples

    #> 4 Bluegill 184 134.4 quality #> 5 Bluegill 167 91.9 quality #> 6 Bluegill 94 38.3 stock -# Add variable using category names -- non-formula notation -df$PSD1 <- psdAdd(df$tl,df$species) +# Add variable using category names -- non-formula notation +df$PSD1 <- psdAdd(df$tl,df$species) #> No known Gabelhouse (PSD) lengths for: Bluefin Tuna -head(df) +head(df) #> species tl wt PSD PSD1 #> 1 Bluegill 42 -12.5 substock substock #> 2 Bluegill 73 4.8 substock substock @@ -181,10 +201,10 @@

    Examples

    #> 4 Bluegill 184 134.4 quality quality #> 5 Bluegill 167 91.9 quality quality #> 6 Bluegill 94 38.3 stock stock -# Add variable using length values as names -df$PSD2 <- psdAdd(tl~species,data=df,use.names=FALSE) +# Add variable using length values as names +df$PSD2 <- psdAdd(tl~species,data=df,use.names=FALSE) #> No known Gabelhouse (PSD) lengths for: Bluefin Tuna -head(df) +head(df) #> species tl wt PSD PSD1 PSD2 #> 1 Bluegill 42 -12.5 substock substock 0 #> 2 Bluegill 73 4.8 substock substock 0 @@ -192,10 +212,10 @@

    Examples

    #> 4 Bluegill 184 134.4 quality quality 150 #> 5 Bluegill 167 91.9 quality quality 150 #> 6 Bluegill 94 38.3 stock stock 80 -# Add additional length and name for Bluegill -df$PSD3 <- psdAdd(tl~species,data=df,addSpec="Bluegill",addLens=175) +# Add additional length and name for Bluegill +df$PSD3 <- psdAdd(tl~species,data=df,addSpec="Bluegill",addLens=175) #> No known Gabelhouse (PSD) lengths for: Bluefin Tuna -head(df) +head(df) #> species tl wt PSD PSD1 PSD2 PSD3 #> 1 Bluegill 42 -12.5 substock substock 0 0 #> 2 Bluegill 73 4.8 substock substock 0 0 @@ -203,12 +223,12 @@

    Examples

    #> 4 Bluegill 184 134.4 quality quality 150 175 #> 5 Bluegill 167 91.9 quality quality 150 150 #> 6 Bluegill 94 38.3 stock stock 80 80 -# Add add'l lengths and names for Bluegill and Largemouth Bass from a data.frame -addls <- data.frame(species=c("Bluegill","Largemouth Bass","Largemouth Bass"), - lens=c(175,254,356)) -df$psd4 <- psdAdd(tl~species,data=df,addSpec=addls$species,addLens=addls$lens) +# Add add'l lengths and names for Bluegill and Largemouth Bass from a data.frame +addls <- data.frame(species=c("Bluegill","Largemouth Bass","Largemouth Bass"), + lens=c(175,254,356)) +df$psd4 <- psdAdd(tl~species,data=df,addSpec=addls$species,addLens=addls$lens) #> No known Gabelhouse (PSD) lengths for: Bluefin Tuna -head(df) +head(df) #> species tl wt PSD PSD1 PSD2 PSD3 psd4 #> 1 Bluegill 42 -12.5 substock substock 0 0 0 #> 2 Bluegill 73 4.8 substock substock 0 0 0 @@ -216,20 +236,20 @@

    Examples

    #> 4 Bluegill 184 134.4 quality quality 150 175 175 #> 5 Bluegill 167 91.9 quality quality 150 150 150 #> 6 Bluegill 94 38.3 stock stock 80 80 80 - -## All of the above but using dplyr -if (require(dplyr)) { - df <- df %>% - mutate(PSD1A=psdAdd(tl,species)) %>% - mutate(PSD2A=psdAdd(tl,species,use.names=FALSE)) %>% - mutate(psd3a=psdAdd(tl,species,addSpec="Bluegill",addLens=175)) %>% - mutate(psd4a=psdAdd(tl,species,addSpec=addls$species,addLens=addls$lens)) -} + +## All of the above but using dplyr +if (require(dplyr)) { + df <- df %>% + mutate(PSD1A=psdAdd(tl,species)) %>% + mutate(PSD2A=psdAdd(tl,species,use.names=FALSE)) %>% + mutate(psd3a=psdAdd(tl,species,addSpec="Bluegill",addLens=175)) %>% + mutate(psd4a=psdAdd(tl,species,addSpec=addls$species,addLens=addls$lens)) +} #> No known Gabelhouse (PSD) lengths for: Bluefin Tuna #> No known Gabelhouse (PSD) lengths for: Bluefin Tuna #> No known Gabelhouse (PSD) lengths for: Bluefin Tuna #> No known Gabelhouse (PSD) lengths for: Bluefin Tuna -df +df #> species tl wt PSD PSD1 PSD2 PSD3 psd4 PSD1A #> 1 Bluegill 42 -12.5 substock substock 0 0 0 substock #> 2 Bluegill 73 4.8 substock substock 0 0 0 substock @@ -412,29 +432,25 @@

    Examples

    #> 88 NA NA NA #> 89 NA NA NA #> 90 NA NA NA - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/psdCI.html b/docs/reference/psdCI.html index fa6c076f..d0dacac7 100644 --- a/docs/reference/psdCI.html +++ b/docs/reference/psdCI.html @@ -1,159 +1,177 @@ -Compute confidence intervals for PSD-X and PSD X-Y values. — psdCI • FSACompute confidence intervals for PSD-X and PSD X-Y values. — psdCI • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Compute confidence intervals for (traditional) PSD-X and (incremental) PSD X-Y values as requested by the user.

    -
    -
    psdCI(
    -  indvec,
    -  ptbl,
    -  n,
    -  method = c("binomial", "multinomial"),
    -  bin.type = c("wilson", "exact", "asymptotic"),
    -  conf.level = 0.95,
    -  label = NULL,
    -  digits = 1
    -)
    +
    +

    Usage

    +
    psdCI(
    +  indvec,
    +  ptbl,
    +  n,
    +  method = c("binomial", "multinomial"),
    +  bin.type = c("wilson", "exact", "asymptotic"),
    +  conf.level = 0.95,
    +  label = NULL,
    +  digits = 1
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    indvec

    A numeric vector of 0s and 1s that identify the linear combination of proportions from ptbl that the user is interested in. See details.

    + +
    ptbl

    A numeric vector or array that contains the proportion or percentage of all individuals in each length category. See details.

    + +
    n

    A single numeric of the number of fish used to construct ptbl.

    + +
    method

    A string that identifies the confidence interval method to use. See details.

    + +
    bin.type

    A string that identifies the type of method to use for calculation of the confidence intervals when Rmethod="binomial". See details of binCI.

    + +
    conf.level

    A number that indicates the level of confidence to use for constructing confidence intervals (default is 0.95).

    + +
    label

    A single string that can be used to label the row of the output matrix.

    + +
    digits

    A numeric that indicates the number of decimals to round the result to.

    +
    -
    -

    Value

    -

    A matrix with columns that contain the computed PSD-X or PSD X-Y value and the associated confidence interval. The confidence interval values were set to zero or 100 if the computed value was negative or greater than 100, respectively.

    +
    +

    Value

    + + +

    A matrix with columns that contain the computed PSD-X or PSD X-Y value and the associated confidence interval. The confidence interval values were set to zero or 100 if the computed value was negative or greater than 100, respectively.

    -
    -

    Details

    +
    +

    Details

    Computes confidence intervals for (traditional) PSD-X and (incremental) PSD X-Y values. Two methods can be used as chosen with method=. If method="binomial" then the binomial distribution (via binCI()) is used. If method="multinomial" then the multinomial method described by Brenden et al. (2008) is used. This function is defined to compute one confidence interval so method="binomial" is the default. See examples and psdCalc for computing several simultaneous confidence intervals.

    A table of proportions within each length category is given in ptbl. If ptbl has any values greater than 1 then it is assumed that a table of percentages was supplied and the entire table will be divided by 100 to continue. The proportions must sum to 1 (with some allowance for rounding).

    A vector of length equal to the length of ptbl is given in indvec which contains zeros and ones to identify the linear combination of values in ptbl to use to construct the confidence intervals. For example, if ptbl has four proportions then indvec=c(1,0,0,0) would be used to construct a confidence interval for the population proportion in the first category. Alternatively, indvec=c(0,0,1,1) would be used to construct a confidence interval for the population proportion in the last two categories. This vector must not contain all zeros or all ones.

    -
    -

    Testing

    +
    +

    Testing

    The multinomial results match the results given in Brendent et al. (2008).

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    6-Size Structure.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    Brenden, T.O., T. Wagner, and B.R. Murphy. 2008. Novel tools for analyzing proportional size distribution index data. North American Journal of Fisheries Management 28:1233-1242. [Was (is?) from http://qfc.fw.msu.edu/Publications/Publication%20List/2008/Novel%20Tools%20for%20Analyzing%20Proportional%20Size%20Distribution_Brenden.pdf.]

    -
    -

    See also

    +
    +

    See also

    See psdVal, psdPlot, psdAdd, PSDlit, tictactoe, lencat, and rcumsum for related functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## similar to Brenden et al. (2008)
    -n <- 997
    -ipsd <- c(130,491,253,123)/n
    -
    -## single binomial
    -psdCI(c(0,0,1,1),ipsd,n=n)
    +    
    +

    Examples

    +
    ## similar to Brenden et al. (2008)
    +n <- 997
    +ipsd <- c(130,491,253,123)/n
    +
    +## single binomial
    +psdCI(c(0,0,1,1),ipsd,n=n)
     #>      Estimate 95% LCI 95% UCI
     #> [1,]     37.7    34.8    40.8
    -psdCI(c(1,0,0,0),ipsd,n=n,label="PSD S-Q")
    +psdCI(c(1,0,0,0),ipsd,n=n,label="PSD S-Q")
     #>         Estimate 95% LCI 95% UCI
     #> PSD S-Q       13    11.1    15.3
    -
    -## single multinomial
    -psdCI(c(0,0,1,1),ipsd,n=n,method="multinomial")
    +
    +## single multinomial
    +psdCI(c(0,0,1,1),ipsd,n=n,method="multinomial")
     #>      Estimate 95% LCI 95% UCI
     #> [1,]     37.7    33.4      42
    -psdCI(c(1,0,0,0),ipsd,n=n,method="multinomial",label="PSD S-Q")
    +psdCI(c(1,0,0,0),ipsd,n=n,method="multinomial",label="PSD S-Q")
     #>         Estimate 95% LCI 95% UCI
     #> PSD S-Q       13    10.1      16
    -
    -## multiple multinomials (but see psdCalc())
    -lbls <- c("PSD S-Q","PSD Q-P","PSD P-M","PSD M-T","PSD","PSD-P")
    -imat <- matrix(c(1,0,0,0,
    -                 0,1,0,0,
    -                 0,0,1,0,
    -                 0,0,0,1,
    -                 0,1,1,1,
    -                 0,0,1,1),nrow=6,byrow=TRUE)
    -rownames(imat) <- lbls
    -imat
    +
    +## multiple multinomials (but see psdCalc())
    +lbls <- c("PSD S-Q","PSD Q-P","PSD P-M","PSD M-T","PSD","PSD-P")
    +imat <- matrix(c(1,0,0,0,
    +                 0,1,0,0,
    +                 0,0,1,0,
    +                 0,0,0,1,
    +                 0,1,1,1,
    +                 0,0,1,1),nrow=6,byrow=TRUE)
    +rownames(imat) <- lbls
    +imat
     #>         [,1] [,2] [,3] [,4]
     #> PSD S-Q    1    0    0    0
     #> PSD Q-P    0    1    0    0
    @@ -161,10 +179,10 @@ 

    Examples

    #> PSD M-T 0 0 0 1 #> PSD 0 1 1 1 #> PSD-P 0 0 1 1 - -mcis <- t(apply(imat,MARGIN=1,FUN=psdCI,ptbl=ipsd,n=n,method="multinomial")) -colnames(mcis) <- c("Estimate","95% LCI","95% UCI") -mcis + +mcis <- t(apply(imat,MARGIN=1,FUN=psdCI,ptbl=ipsd,n=n,method="multinomial")) +colnames(mcis) <- c("Estimate","95% LCI","95% UCI") +mcis #> Estimate 95% LCI 95% UCI #> PSD S-Q 13.0 10.1 16.0 #> PSD Q-P 49.2 44.8 53.7 @@ -172,11 +190,11 @@

    Examples

    #> PSD M-T 12.3 9.4 15.2 #> PSD 87.0 84.0 89.9 #> PSD-P 37.7 33.4 42.0 - -## Multiple "Bonferroni-corrected" (for six comparisons) binomial method -bcis <- t(apply(imat,MARGIN=1,FUN=psdCI,ptbl=ipsd,n=n,conf.level=1-0.05/6)) -colnames(bcis) <- c("Estimate","95% LCI","95% UCI") -bcis + +## Multiple "Bonferroni-corrected" (for six comparisons) binomial method +bcis <- t(apply(imat,MARGIN=1,FUN=psdCI,ptbl=ipsd,n=n,conf.level=1-0.05/6)) +colnames(bcis) <- c("Estimate","95% LCI","95% UCI") +bcis #> Estimate 95% LCI 95% UCI #> PSD S-Q 13.0 10.5 16.1 #> PSD Q-P 49.2 45.1 53.4 @@ -184,29 +202,25 @@

    Examples

    #> PSD M-T 12.3 9.8 15.3 #> PSD 87.0 83.9 89.5 #> PSD-P 37.7 33.8 41.8 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/psdCalc.html b/docs/reference/psdCalc.html index 7646fe0c..1f524895 100644 --- a/docs/reference/psdCalc.html +++ b/docs/reference/psdCalc.html @@ -1,153 +1,181 @@ -Convenience function for calculating PSD-X and PSD X-Y values. — psdCalc • FSAConvenience function for calculating PSD-X and PSD X-Y values. — psdCalc • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Convenience function for calculating (traditional) PSD-X and (incremental) PSD X-Y values for all Gabelhouse lengths and increments thereof.

    -
    -
    psdCalc(
    -  formula,
    -  data,
    -  species,
    -  units = c("mm", "cm", "in"),
    -  method = c("multinomial", "binomial"),
    -  conf.level = 0.95,
    -  addLens = NULL,
    -  addNames = NULL,
    -  justAdds = FALSE,
    -  what = c("all", "traditional", "incremental", "none"),
    -  drop0Est = TRUE,
    -  showIntermediate = FALSE,
    -  digits = 0
    -)
    +
    +

    Usage

    +
    psdCalc(
    +  formula,
    +  data,
    +  species,
    +  units = c("mm", "cm", "in"),
    +  method = c("multinomial", "binomial"),
    +  conf.level = 0.95,
    +  addLens = NULL,
    +  addNames = NULL,
    +  justAdds = FALSE,
    +  what = c("all", "traditional", "incremental", "none"),
    +  drop0Est = TRUE,
    +  showIntermediate = FALSE,
    +  digits = 0
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    formula

    A formula of the form ~length where “length” generically represents a variable in data that contains the observed lengthsNote that this formula may only contain one variable and it must be numeric.

    + +
    data

    A data.frame that minimally contains the observed lengths given in the variable in formula.

    + +
    species

    A string that contains the species name for which Gabelhouse lengths existSee psdVal for details.

    + +
    units

    A string that indicates the type of units used for the lengthsChoices are mm for millimeters (DEFAULT), cm for centimeters, and in for inches.

    + +
    method

    A character that identifies the confidence interval method to useSee details in psdCI.

    + +
    conf.level

    A number that indicates the level of confidence to use for constructing confidence intervals (default is 0.95).

    + +
    addLens

    A numeric vector that contains minimum lengths for additional categoriesSee psdVal for details.

    + +
    addNames

    A string vector that contains names for the additional lengths added with addLensSee psdVal for details.

    + +
    justAdds

    A logical that indicates whether just the values related to the length sin addLens should be returned.

    + +
    what

    A string that indicates the type of PSD values that will be printedSee details.

    + +
    drop0Est

    A logical that indicates whether the PSD values that are zero should be dropped from the output.

    + +
    showIntermediate

    A logical that indicates whether the number of fish in the category and the number of stock fish (i.e., “intermediate” values) should be included in the returned matrixDefault is to not include these values.

    + +
    digits

    A numeric that indicates the number of decimals to round the result toDefault is zero digits following the recommendation of Neumann and Allen (2007).

    +
    -
    -

    Value

    -

    A matrix with columns that contain the computed PSD-X or PSD X-Y values and associated confidence intervalsIf showIntermediate=TRUE then the number of fish in the category and the number of stock fish will also be shown.

    +
    +

    Value

    + + +

    A matrix with columns that contain the computed PSD-X or PSD X-Y values and associated confidence intervalsIf showIntermediate=TRUE then the number of fish in the category and the number of stock fish will also be shown.

    -
    -

    Details

    +
    +

    Details

    Computes the (traditional) PSD-X and (incremental) PSD X-Y values, with associated confidence intervals, for each Gabelhouse lengthAll PSD-X and PSD X-Y values are printed if what="all" (DEFAULT), only PSD-X values are printed if what="traditional", only PSD X-Y values are printed if what="incremental", and nothing is printed (but the matrix is still returned) if what="none".

    Confidence intervals can be computed with either the multinomial (Default) or binomial distribution as set in methodSee details in psdCI for more information.

    -
    -

    Testing

    +
    +

    Testing

    Point estimate calculations match those constructed "by hand."

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    6-Size Structure.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with RChapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with RChapman & Hall/CRC, Boca Raton, FL.

    Guy, C.S., R.M. Neumann, and D.W. Willis2006New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS)Fisheries 31:86-87 [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.]

    Guy, C.S., R.M. Neumann, D.W. Willis, and R.O. Anderson2006Proportional size distribution (PSD): A further refinement of population size structure index terminologyFisheries 32:348[Was (is?) from http://pubstorage.sdstate.edu/wfs/450-F.pdf.]

    Neumann, R. M. and Allen, M. S2007Size structure. In Guy, C. S. and Brown, M. L., editors, Analysis and Interpretation of Freshwater Fisheries Data, Chapter 9, pages 375-421. American Fisheries Society, Bethesda, MD.

    Willis, D.W., B.R. Murphy, and C.S. Guy1993Stock density indices: development, use, and limitationsReviews in Fisheries Science 1:203-222[Was (is?) from http://web1.cnre.vt.edu/murphybr/web/Readings/Willis%20et%20al.pdf.]

    -
    -

    See also

    +
    +

    See also

    See psdVal, psdPlot, psdAdd, PSDlit, tictactoe, lencat, and rcumsum for related functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Random length data
    -# suppose this is yellow perch to the nearest mm
    -yepdf <- data.frame(yepmm=round(c(rnorm(100,mean=125,sd=15),
    -                      rnorm(50,mean=200,sd=25),
    -                      rnorm(20,mean=300,sd=40)),0),
    -                    species=rep("Yellow Perch",170))
    -psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1)
    +    
    +

    Examples

    +
    ## Random length data
    +# suppose this is yellow perch to the nearest mm
    +yepdf <- data.frame(yepmm=round(c(rnorm(100,mean=125,sd=15),
    +                      rnorm(50,mean=200,sd=25),
    +                      rnorm(20,mean=300,sd=40)),0),
    +                    species=rep("Yellow Perch",170))
    +psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1)
     #> Warning: Some category sample size <20, some CI coverage may be
     #>  lower than 95%.
     #>         Estimate 95% LCI 95% UCI
    @@ -159,7 +187,7 @@ 

    Examples

    #> PSD Q-P 18.1 6.5 29.7 #> PSD P-M 11.4 1.9 21.0 #> PSD M-T 6.7 0.0 14.2 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1,drop0Est=TRUE) +psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1,drop0Est=TRUE) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI @@ -171,9 +199,9 @@

    Examples

    #> PSD Q-P 18.1 6.5 29.7 #> PSD P-M 11.4 1.9 21.0 #> PSD M-T 6.7 0.0 14.2 - -## add a length -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=150) + +## add a length +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=150) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI @@ -187,9 +215,9 @@

    Examples

    #> PSD Q-P 18 6 31 #> PSD P-M 11 1 22 #> PSD M-T 7 0 15 - -## add lengths with names -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=150,addNames="minLen") + +## add lengths with names +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=150,addNames="minLen") #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI @@ -203,7 +231,7 @@

    Examples

    #> PSD Q-P 18 6 31 #> PSD P-M 11 1 22 #> PSD M-T 7 0 15 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c("minLen"=150)) +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c("minLen"=150)) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI @@ -217,7 +245,7 @@

    Examples

    #> PSD Q-P 18 6 31 #> PSD P-M 11 1 22 #> PSD M-T 7 0 15 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c(150,275),addNames=c("minSlot","maxSlot")) +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c(150,275),addNames=c("minSlot","maxSlot")) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI @@ -233,7 +261,7 @@

    Examples

    #> PSD P-maxSlot 4 0 10 #> PSD maxSlot-M 8 0 17 #> PSD M-T 7 0 15 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c("minLen"=150,"maxslot"=275)) +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c("minLen"=150,"maxslot"=275)) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI @@ -249,23 +277,23 @@

    Examples

    #> PSD P-maxslot 4 0 10 #> PSD maxslot-M 8 0 17 #> PSD M-T 7 0 15 - -## add lengths with names, return just those values that use those lengths -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c("minLen"=150),justAdds=TRUE) + +## add lengths with names, return just those values that use those lengths +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c("minLen"=150),justAdds=TRUE) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI #> PSD-minLen 70 56 85 #> PSD S-minLen 30 15 44 #> PSD minLen-Q 33 18 49 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c("minLen"=150),justAdds=TRUE, - what="traditional") +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c("minLen"=150),justAdds=TRUE, + what="traditional") #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI #> 70 56 85 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c(150,275), - addNames=c("minSlot","maxSlot"),justAdds=TRUE) +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c(150,275), + addNames=c("minSlot","maxSlot"),justAdds=TRUE) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI @@ -275,16 +303,16 @@

    Examples

    #> PSD minSlot-Q 33 17 50 #> PSD P-maxSlot 4 0 10 #> PSD maxSlot-M 8 0 17 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c(150,275), - addNames=c("minSlot","maxSlot"),justAdds=TRUE,what="traditional") +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=c(150,275), + addNames=c("minSlot","maxSlot"),justAdds=TRUE,what="traditional") #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI #> PSD-minSlot 70 55 86 #> PSD-maxSlot 15 3 28 - -## different output types -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=150,what="traditional") + +## different output types +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=150,what="traditional") #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI @@ -293,7 +321,7 @@

    Examples

    #> PSD-P 19 6 32 #> PSD-M 8 0 16 #> PSD-T 1 0 4 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=150,what="incremental") +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=150,what="incremental") #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI @@ -302,12 +330,12 @@

    Examples

    #> PSD Q-P 18 6 31 #> PSD P-M 11 1 22 #> PSD M-T 7 0 15 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=150,what="none") +psdCalc(~yepmm,data=yepdf,species="Yellow perch",addLens=150,what="none") #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. - -## Show intermediate values -psdCalc(~yepmm,data=yepdf,species="Yellow perch",showInterm=TRUE) + +## Show intermediate values +psdCalc(~yepmm,data=yepdf,species="Yellow perch",showInterm=TRUE) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> num stock Estimate 95% LCI 95% UCI @@ -319,7 +347,7 @@

    Examples

    #> PSD Q-P 19 105 18 7 30 #> PSD P-M 12 105 11 2 21 #> PSD M-T 7 105 7 0 14 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",what="traditional",showInterm=TRUE) +psdCalc(~yepmm,data=yepdf,species="Yellow perch",what="traditional",showInterm=TRUE) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> num stock Estimate 95% LCI 95% UCI @@ -327,7 +355,7 @@

    Examples

    #> PSD-P 20 105 19 7 31 #> PSD-M 8 105 8 0 16 #> PSD-T 1 105 1 0 4 -psdCalc(~yepmm,data=yepdf,species="Yellow perch",what="incremental",showInterm=TRUE) +psdCalc(~yepmm,data=yepdf,species="Yellow perch",what="incremental",showInterm=TRUE) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> num stock Estimate 95% LCI 95% UCI @@ -335,9 +363,9 @@

    Examples

    #> PSD Q-P 19 105 18 7 30 #> PSD P-M 12 105 11 2 21 #> PSD M-T 7 105 7 0 14 - -## Control the digits -psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1) + +## Control the digits +psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1) #> Warning: Some category sample size <20, some CI coverage may be #> lower than 95%. #> Estimate 95% LCI 95% UCI @@ -349,29 +377,25 @@

    Examples

    #> PSD Q-P 18.1 6.5 29.7 #> PSD P-M 11.4 1.9 21.0 #> PSD M-T 6.7 0.0 14.2 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/psdPlot.html b/docs/reference/psdPlot.html index 52c69add..4bc445d5 100644 --- a/docs/reference/psdPlot.html +++ b/docs/reference/psdPlot.html @@ -1,209 +1,251 @@ -Length-frequency histogram with Gabelhouse lengths highlighted. — psdPlot • FSALength-frequency histogram with Gabelhouse lengths highlighted. — psdPlot • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Constructs a length-frequency histogram with Gabelhouse lengths highlighted.

    -
    -
    psdPlot(
    -  formula,
    -  data,
    -  species = "List",
    -  units = c("mm", "cm", "in"),
    -  startcat = 0,
    -  w = 1,
    -  justPSDQ = FALSE,
    -  main = "",
    -  xlab = "Length",
    -  ylab = "Number",
    -  xlim = NULL,
    -  ylim = c(0, max(h$counts) * 1.05),
    -  substock.col = "white",
    -  stock.col = "gray90",
    -  psd.col = "black",
    -  psd.lty = 2,
    -  psd.lwd = 1,
    -  show.abbrevs = TRUE,
    -  psd.add = TRUE,
    -  psd.pos = "topleft",
    -  psd.cex = 0.75,
    -  ...
    -)
    +
    +

    Usage

    +
    psdPlot(
    +  formula,
    +  data,
    +  species = "List",
    +  units = c("mm", "cm", "in"),
    +  startcat = 0,
    +  w = 1,
    +  justPSDQ = FALSE,
    +  main = "",
    +  xlab = "Length",
    +  ylab = "Number",
    +  xlim = NULL,
    +  ylim = c(0, max(h$counts) * 1.05),
    +  substock.col = "white",
    +  stock.col = "gray90",
    +  psd.col = "black",
    +  psd.lty = 2,
    +  psd.lwd = 1,
    +  show.abbrevs = TRUE,
    +  psd.add = TRUE,
    +  psd.pos = "topleft",
    +  psd.cex = 0.75,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    formula

    A formula of the form ~length where “length” generically represents a variable in data that contains length measurements. Note that this formula can only contain one variable.

    + +
    data

    A data.frame that minimally contains the length measurements given in the variable in the formula.

    + +
    species

    A string that contains the species name for which Gabelhouse length categories exist. See psdVal for details.

    + +
    units

    A string that indicates the type of units used for the length measurements. Choices are mm for millimeters (DEFAULT), cm for centimeters, and in for inches.

    + +
    startcat

    A number that indicates the beginning of the first length-class.

    + +
    w

    A number that indicates the width of length classes to create.

    + +
    justPSDQ

    A logical that indicates whether just stock and quality (for PSD-Q calculations) categories should be used. If FALSE (default) then the five Gabelhouse categories will be used.

    + +
    main

    A string that serves as the main label for the histogram.

    + +
    xlab

    A string that serves as the label for the x-axis.

    + +
    ylab

    A string that serves as the label for the y-axis.

    + +
    xlim

    A numeric vector of length two that indicates the minimum and maximum values (i.e., fish lengths) for the x-axis.

    + +
    ylim

    A numeric vector of length two that indicates the minimum and maximum values for the y-axis.

    + +
    substock.col

    A string that indicates the color to use for the bars representing under-stock size fish.

    + +
    stock.col

    A string that indicates the color to use for the bars representing stock size fish.

    + +
    psd.col

    A string that indicates the color to use for the vertical lines at the Gabelhouse length category values.

    + +
    psd.lty

    A numeric that indicates the line type to use for the vertical lines at the Gabelhouse length category values.

    + +
    psd.lwd

    A numeric that indicates the line width to use for the vertical lines at the Gabelhouse length category values.

    + +
    show.abbrevs

    A logical that indicates if the abbreviations for the Gabelhouse length categories should be added to the top of the plot.

    + +
    psd.add

    A logical that indicates if the calculated PSD values should be added to the plot (default is TRUE).

    + +
    psd.pos

    A string that indicates the position for where the PSD values will be shown. See details in legend.

    + +
    psd.cex

    A numeric value that indicates the character expansion for the PSD values text.

    + +
    ...

    Arguments to be passed to the low-level plotting functions.

    +
    -
    -

    Value

    -

    None. However, a graphic is produced.

    +
    +

    Value

    + + +

    None. However, a graphic is produced.

    -
    -

    Details

    +
    +

    Details

    Constructs a length-frequency histogram with the stock-sized fish highlighted, the Gabelhouse lengths marked by vertical lines, and the (traditional) PSD-X values superimposed.

    The length of fish plotted on the x-axis can be controlled with xlim, however, the minimum value in xlim must be less than the stock length for that species.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    6-Size Structure.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    Guy, C.S., R.M. Neumann, and D.W. Willis. 2006. New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS). Fisheries 31:86-87. [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.]

    Guy, C.S., R.M. Neumann, D.W. Willis, and R.O. Anderson. 2006. Proportional size distribution (PSD): A further refinement of population size structure index terminology. Fisheries 32:348. [Was (is?) from http://pubstorage.sdstate.edu/wfs/450-F.pdf.]

    Willis, D.W., B.R. Murphy, and C.S. Guy. 1993. Stock density indices: development, use, and limitations. Reviews in Fisheries Science 1:203-222. [Was (is?) from http://web1.cnre.vt.edu/murphybr/web/Readings/Willis%20et%20al.pdf.]

    -
    -

    See also

    +
    +

    See also

    See psdVal, psdCalc, psdAdd, PSDlit, lencat, tictactoe, lencat, and rcumsum for related functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Random length data
    -# suppose this is yellow perch to the nearest mm
    -df <- data.frame(spec=rep("Yellow Perch",170),
    -                 mm=c(rnorm(100,mean=125,sd=15),rnorm(50,mean=200,sd=25),
    -                      rnorm(20,mean=300,sd=40)))
    -
    -## Example graphics
    -op <- par(mar=c(3,3,2,1),mgp=c(1.7,0.5,0))
    -# Using 10-mm increments
    -psdPlot(~mm,data=df,species="Yellow perch",w=10)
    +    
    +

    Examples

    +
    ## Random length data
    +# suppose this is yellow perch to the nearest mm
    +df <- data.frame(spec=rep("Yellow Perch",170),
    +                 mm=c(rnorm(100,mean=125,sd=15),rnorm(50,mean=200,sd=25),
    +                      rnorm(20,mean=300,sd=40)))
    +
    +## Example graphics
    +op <- par(mar=c(3,3,2,1),mgp=c(1.7,0.5,0))
    +# Using 10-mm increments
    +psdPlot(~mm,data=df,species="Yellow perch",w=10)
     
    -psdPlot(~mm,data=df,species="Yellow perch",w=10,substock.col="gray90",
    -        stock.col="gray30")
    +psdPlot(~mm,data=df,species="Yellow perch",w=10,substock.col="gray90",
    +        stock.col="gray30")
     
    -# ... but without the PSD values
    -psdPlot(~mm,data=df,species="Yellow perch",w=10,psd.add=FALSE)
    +# ... but without the PSD values
    +psdPlot(~mm,data=df,species="Yellow perch",w=10,psd.add=FALSE)
     
    -par(op)
    -
    +par(op)
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/psdVal.html b/docs/reference/psdVal.html index 48848de3..dc2f1345 100644 --- a/docs/reference/psdVal.html +++ b/docs/reference/psdVal.html @@ -1,123 +1,137 @@ -Finds Gabelhouse lengths (for PSD calculations) for a species. — psdVal • FSAFinds Gabelhouse lengths (for PSD calculations) for a species. — psdVal • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Returns a vector with the five Gabelhouse lengths for a chosen species.

    -
    -
    psdVal(
    -  species = "List",
    -  units = c("mm", "cm", "in"),
    -  incl.zero = TRUE,
    -  addLens = NULL,
    -  addNames = NULL,
    -  showJustSource = FALSE
    -)
    +
    +

    Usage

    +
    psdVal(
    +  species = "List",
    +  units = c("mm", "cm", "in"),
    +  incl.zero = TRUE,
    +  addLens = NULL,
    +  addNames = NULL,
    +  showJustSource = FALSE
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    species

    A string that contains the species name for which to find Gabelhouse lengths. See details.

    + +
    units

    A string that indicates the units for the returned lengths. Choices are mm for millimeters (DEFAULT), cm for centimeters, and in for inches.

    + +
    incl.zero

    A logical that indicates if a zero is included in the first position of the returned vector (DEFAULT) or not. This position will be named “substock”. See details.

    + +
    addLens

    A numeric vector that contains minimum length definitions for additional categories. See details.

    + +
    addNames

    A string vector that contains names for the additional length categories added with addLens. See details.

    + +
    showJustSource

    A logical that indicates whether just the literature source information should be returned (TRUE) or not. If TRUE this will NOT return any of the Gabelhouse length information.

    +
    -
    -

    Value

    -

    A vector of minimum values for length categories for the chosen species.

    +
    +

    Value

    + + +

    A vector of minimum values for length categories for the chosen species.

    -
    -

    Details

    +
    +

    Details

    Finds the Gabelhouse lengths from data(PSDlit) for the species given in species. The species name must be spelled exactly (within capitalization differences) as it appears in data(PSDlit). Type psdVal() to see the list of species and how they are spelled.

    A zero is included in the first position of the returned vector if incl.zero=TRUE. This is useful when computing PSD values with a data.frame that contains fish smaller than the stock length.

    Additional lengths may be added to the returned vector with addLens. Names for these lengths can be included in addNames. If addNames is non-NULL, then it must be of the same length as addLens. If addLens is non-NULL but addNames is NULL, then the default names will be the same as the lengths in addLens. The addLens argument is useful for calculating PSD values that are different from the Gabelhouse lengths.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    6-Size Structure.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    Guy, C.S., R.M. Neumann, and D.W. Willis. 2006. New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS). Fisheries 31:86-87. [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.]

    Guy, C.S., R.M. Neumann, D.W. Willis, and R.O. Anderson. 2006. Proportional size distribution (PSD): A further refinement of population size structure index terminology. Fisheries 32:348. [Was (is?) from http://pubstorage.sdstate.edu/wfs/450-F.pdf.]

    Willis, D.W., B.R. Murphy, and C.S. Guy. 1993. Stock density indices: development, use, and limitations. Reviews in Fisheries Science 1:203-222. [Was (is?) from http://web1.cnre.vt.edu/murphybr/web/Readings/Willis%20et%20al.pdf.]

    -
    -

    See also

    +
    +

    See also

    See psdCalc, psdPlot, psdAdd, PSDlit, tictactoe, lencat, and rcumsum for related functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    # List all of the species
    -psdVal()
    +    
    +

    Examples

    +
    # List all of the species
    +psdVal()
     #> 
     #> Species name must be one of following. Be careful of spelling and capitalization.
     #>  [1] "Arctic Grayling"             "Bighead Carp"               
    @@ -154,71 +168,67 @@ 

    Examples

    #> [63] "White Crappie" "White Perch" #> [65] "White Sucker" "Yellow Bass" #> [67] "Yellow Bullhead" "Yellow Perch" -# Demonstrate typical usages -psdVal("Yellow perch") +# Demonstrate typical usages +psdVal("Yellow perch") #> substock stock quality preferred memorable trophy #> 0 130 200 250 300 380 -psdVal("Walleye",units="cm") +psdVal("Walleye",units="cm") #> substock stock quality preferred memorable trophy #> 0 25 38 51 63 76 -psdVal("Bluegill",units="in") +psdVal("Bluegill",units="in") #> substock stock quality preferred memorable trophy #> 0 3 6 8 10 12 -psdVal("Bluegill",units="in",incl.zero=FALSE) +psdVal("Bluegill",units="in",incl.zero=FALSE) #> stock quality preferred memorable trophy #> 3 6 8 10 12 -psdVal("Bluegill") +psdVal("Bluegill") #> substock stock quality preferred memorable trophy #> 0 80 150 200 250 300 -# Demonstrate that it will work with mis-capitalization -psdVal("bluegill") +# Demonstrate that it will work with mis-capitalization +psdVal("bluegill") #> substock stock quality preferred memorable trophy #> 0 80 150 200 250 300 -psdVal("Yellow Perch") +psdVal("Yellow Perch") #> substock stock quality preferred memorable trophy #> 0 130 200 250 300 380 -# Demonstrate adding in user-defined categories -psdVal("Bluegill",units="in",addLens=7) +# Demonstrate adding in user-defined categories +psdVal("Bluegill",units="in",addLens=7) #> substock stock quality 7 preferred memorable trophy #> 0 3 6 7 8 10 12 -psdVal("Bluegill",units="in",addLens=7,addNames="MinLen") +psdVal("Bluegill",units="in",addLens=7,addNames="MinLen") #> substock stock quality MinLen preferred memorable trophy #> 0 3 6 7 8 10 12 -psdVal("Bluegill",units="in",addLens=c(7,9),addNames=c("MinSlot","MaxSlot")) +psdVal("Bluegill",units="in",addLens=c(7,9),addNames=c("MinSlot","MaxSlot")) #> substock stock quality MinSlot preferred MaxSlot memorable trophy #> 0 3 6 7 8 9 10 12 -psdVal("Bluegill",units="in",addLens=c("MinLen"=7)) +psdVal("Bluegill",units="in",addLens=c("MinLen"=7)) #> substock stock quality MinLen preferred memorable trophy #> 0 3 6 7 8 10 12 -psdVal("Bluegill",units="in",addLens=c("MinSlot"=7,"MaxSlot"=9)) +psdVal("Bluegill",units="in",addLens=c("MinSlot"=7,"MaxSlot"=9)) #> substock stock quality MinSlot preferred MaxSlot memorable trophy #> 0 3 6 7 8 9 10 12 -psdVal("Bluegill",showJustSource=TRUE) +psdVal("Bluegill",showJustSource=TRUE) #> species source #> 8 Bluegill Gabelhouse (1984a) - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/rSquared.html b/docs/reference/rSquared.html index 9ec72bcc..94bc8944 100644 --- a/docs/reference/rSquared.html +++ b/docs/reference/rSquared.html @@ -1,131 +1,137 @@ -Extract the coefficient of determination from a linear model object. — rSquared • FSAExtract the coefficient of determination from a linear model object. — rSquared • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Extracts the coefficient of determination (i.e., “r-squared”) from a linear model (i.e., lm) object.

    -
    -
    rSquared(object, ...)
    -
    -# S3 method for default
    -rSquared(object, ...)
    -
    -# S3 method for lm
    -rSquared(object, digits = getOption("digits"), percent = FALSE, ...)
    +
    +

    Usage

    +
    rSquared(object, ...)
    +
    +# S3 method for default
    +rSquared(object, ...)
    +
    +# S3 method for lm
    +rSquared(object, digits = getOption("digits"), percent = FALSE, ...)
    -
    -

    Arguments

    +
    +

    Arguments

    object

    An object saved from lm.

    + +
    ...

    Additional arguments for methods.

    + +
    digits

    A single number that is the number of digits to round the returned result to.

    + +
    percent

    A logical that indicates if the result should be returned as a percentage (=TRUE) or as a proportion (=FALSE; default).

    +
    -
    -

    Value

    -

    A numeric, as either a proportion or percentage, that is the coefficient of determination for a linear model.

    +
    +

    Value

    + + +

    A numeric, as either a proportion or percentage, that is the coefficient of determination for a linear model.

    -
    -

    Details

    +
    +

    Details

    This is a convenience function to extract the r.squared part from summary(lm).

    -
    -

    Examples

    -
    lm1 <- lm(mirex~weight, data=Mirex)
    -rSquared(lm1)
    +    
    +

    Examples

    +
    lm1 <- lm(mirex~weight, data=Mirex)
    +rSquared(lm1)
     #> [1] 0.1812022
    -rSquared(lm1,digits=3)
    +rSquared(lm1,digits=3)
     #> [1] 0.181
    -rSquared(lm1,digits=1,percent=TRUE)
    +rSquared(lm1,digits=1,percent=TRUE)
     #> [1] 18.1
    -
    -## rSquared only works with lm objects
    -if (FALSE) {
    -nls1 <- nls(mirex~a*weight^b,data=Mirex,start=list(a=1,b=1))
    -rSquared(nls1)
    -}
    -
    +
    +## rSquared only works with lm objects
    +if (FALSE) {
    +nls1 <- nls(mirex~a*weight^b,data=Mirex,start=list(a=1,b=1))
    +rSquared(nls1)
    +}
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/rcumsum.html b/docs/reference/rcumsum.html index 20cea023..7b6509ef 100644 --- a/docs/reference/rcumsum.html +++ b/docs/reference/rcumsum.html @@ -1,98 +1,102 @@ -Computes the prior to or reverse cumulative sum of a vector. — rcumsum • FSAComputes the prior to or reverse cumulative sum of a vector. — rcumsum • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes the prior-to (i.e., the cumulative sum prior to but not including the current value) or the reverse (i.e., the number that large or larger) cumulative sum of a vector. Also works for 1-dimensional tables, matrices, and data.frames, though it is best used with vectors.

    -
    -
    rcumsum(x)
    -
    -pcumsum(x)
    +
    +

    Usage

    +
    rcumsum(x)
    +
    +pcumsum(x)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    a numeric object.

    +
    -
    -

    Value

    -

    A numeric vector that contains the prior-to or reverse cumulative sums.

    +
    +

    Value

    + + +

    A numeric vector that contains the prior-to or reverse cumulative sums.

    -
    -

    Note

    +
    +

    Note

    An NA in the vector causes all returned values at and after the first NA for pcumsum and at and before the last NA for rcumsum to be NA. See the examples.

    -
    -

    See also

    +
    +

    See also

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Simple example
    -cbind(vals=1:10,
    -      cum=cumsum(1:10),
    -      pcum=pcumsum(1:10),
    -      rcum=rcumsum(1:10))
    +    
    +

    Examples

    +
    ## Simple example
    +cbind(vals=1:10,
    +      cum=cumsum(1:10),
    +      pcum=pcumsum(1:10),
    +      rcum=rcumsum(1:10))
     #>       vals cum pcum rcum
     #>  [1,]    1   1    0   55
     #>  [2,]    2   3    1   54
    @@ -104,25 +108,25 @@ 

    Examples

    #> [8,] 8 36 28 27 #> [9,] 9 45 36 19 #> [10,] 10 55 45 10 - -## Example with NA -vals <- c(1,2,NA,3) -cbind(vals, - cum=cumsum(vals), - pcum=pcumsum(vals), - rcum=rcumsum(vals)) + +## Example with NA +vals <- c(1,2,NA,3) +cbind(vals, + cum=cumsum(vals), + pcum=pcumsum(vals), + rcum=rcumsum(vals)) #> vals cum pcum rcum #> [1,] 1 1 0 NA #> [2,] 2 3 1 NA #> [3,] NA NA NA NA #> [4,] 3 NA NA 3 - -## Example with NA -vals <- c(1,2,NA,3,NA,4) -cbind(vals, - cum=cumsum(vals), - pcum=pcumsum(vals), - rcum=rcumsum(vals)) + +## Example with NA +vals <- c(1,2,NA,3,NA,4) +cbind(vals, + cum=cumsum(vals), + pcum=pcumsum(vals), + rcum=rcumsum(vals)) #> vals cum pcum rcum #> [1,] 1 1 0 NA #> [2,] 2 3 1 NA @@ -130,62 +134,58 @@

    Examples

    #> [4,] 3 NA NA NA #> [5,] NA NA NA NA #> [6,] 4 NA NA 4 - -## Example with a matrix -mat <- matrix(c(1,2,3,4,5),nrow=1) -cumsum(mat) + +## Example with a matrix +mat <- matrix(c(1,2,3,4,5),nrow=1) +cumsum(mat) #> [1] 1 3 6 10 15 -pcumsum(mat) +pcumsum(mat) #> [,1] [,2] [,3] [,4] [,5] #> [1,] 0 1 3 6 10 -rcumsum(mat) +rcumsum(mat) #> [1] 15 14 12 9 5 - -## Example with a table (must be 1-d) -df <- sample(1:10,100,replace=TRUE) -tbl <- table(df) -cumsum(tbl) + +## Example with a table (must be 1-d) +df <- sample(1:10,100,replace=TRUE) +tbl <- table(df) +cumsum(tbl) #> 1 2 3 4 5 6 7 8 9 10 #> 12 23 34 43 51 59 65 78 87 100 -pcumsum(tbl) +pcumsum(tbl) #> df #> 1 2 3 4 5 6 7 8 9 10 #> 0 12 23 34 43 51 59 65 78 87 -rcumsum(tbl) +rcumsum(tbl) #> 1 2 3 4 5 6 7 8 9 10 #> 100 88 77 66 57 49 41 35 22 13 - -## Example with a data.frame (must be 1-d) -df <- sample(1:10,100,replace=TRUE) -tbl <- as.data.frame(table(df))[,-1] -cumsum(tbl) + +## Example with a data.frame (must be 1-d) +df <- sample(1:10,100,replace=TRUE) +tbl <- as.data.frame(table(df))[,-1] +cumsum(tbl) #> [1] 4 17 26 38 47 54 73 82 93 100 -pcumsum(tbl) +pcumsum(tbl) #> [1] 0 4 17 26 38 47 54 73 82 93 -rcumsum(tbl) +rcumsum(tbl) #> [1] 100 96 83 74 62 53 46 27 18 7
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/removal.html b/docs/reference/removal.html index c046c8dd..02860fb7 100644 --- a/docs/reference/removal.html +++ b/docs/reference/removal.html @@ -1,142 +1,178 @@ -Population estimates for k-, 3-, or 2-pass removal data. — removal • FSAPopulation estimates for k-, 3-, or 2-pass removal data. — removal • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes estimates, with confidence intervals, of the population size and probability of capture from the number of fish removed in k-, 3-, or 2-passes in a closed population.

    -
    -
    removal(
    -  catch,
    -  method = c("CarleStrub", "Zippin", "Seber3", "Seber2", "RobsonRegier2", "Moran",
    -    "Schnute", "Burnham"),
    -  alpha = 1,
    -  beta = 1,
    -  CS.se = c("Zippin", "alternative"),
    -  conf.level = 0.95,
    -  just.ests = FALSE,
    -  Tmult = 3,
    -  CIMicroFish = FALSE
    -)
    -
    -# S3 method for removal
    -summary(
    -  object,
    -  parm = c("No", "p", "p1"),
    -  digits = getOption("digits"),
    -  verbose = FALSE,
    -  ...
    -)
    -
    -# S3 method for removal
    -confint(
    -  object,
    -  parm = c("No", "p"),
    -  level = conf.level,
    -  conf.level = NULL,
    -  digits = getOption("digits"),
    -  verbose = FALSE,
    -  ...
    -)
    +
    +

    Usage

    +
    removal(
    +  catch,
    +  method = c("CarleStrub", "Zippin", "Seber3", "Seber2", "RobsonRegier2", "Moran",
    +    "Schnute", "Burnham"),
    +  alpha = 1,
    +  beta = 1,
    +  CS.se = c("Zippin", "alternative"),
    +  conf.level = 0.95,
    +  just.ests = FALSE,
    +  Tmult = 3,
    +  CIMicroFish = FALSE
    +)
    +
    +# S3 method for removal
    +summary(
    +  object,
    +  parm = c("No", "p", "p1"),
    +  digits = getOption("digits"),
    +  verbose = FALSE,
    +  ...
    +)
    +
    +# S3 method for removal
    +confint(
    +  object,
    +  parm = c("No", "p"),
    +  level = conf.level,
    +  conf.level = NULL,
    +  digits = getOption("digits"),
    +  verbose = FALSE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    catch

    A numerical vector of catch at each pass.

    + +
    method

    A single string that identifies the removal method to use. See details.

    + +
    alpha

    A single numeric value for the alpha parameter in the CarleStrub method (default is 1).

    + +
    beta

    A single numeric value for the beta parameter in the CarleStrub method (default is 1).

    + +
    CS.se

    A single string that identifies whether the SE in the CarleStrub method should be computed according to Seber or Zippin.

    + +
    conf.level

    A single number representing the level of confidence to use for constructing confidence intervals. This is sent in the main removal function rather than confint.

    + +
    just.ests

    A logical that indicates whether just the estimates (=TRUE) or the return list (=FALSE; default; see below) is returned.

    + +
    Tmult

    A single numeric that will be multiplied by the total catch in all samples to set the upper value for the range of population sizes when minimizing the log-likelihood and creating confidence intervals for the Moran and Schnute methods. Large values are much slower to compute, but values that are too low may result in missing the best estimate. A warning is issued if too low of a value is suspected.

    + +
    CIMicroFish

    A logical that indicates whether the t value used to calculate confidence intervals when method="Burnham" should be rounded to two or three decimals and whether the confidence intervals for No should be rounded to whole numbers as done in MicroFish 3.0. The default (=FALSE) is to NOT round the t values or No confidence interval. This option is provided only so that results will exactly match MicroFish results (see testing).

    + +
    object

    An object saved from removal().

    + +
    parm

    A specification of which parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all parameters are considered.

    + +
    digits

    A single numeric that controls the number of decimals in the output from summary and confint.

    + +
    verbose

    A logical that indicates whether descriptive labels should be printed from summary and if certain warnings are shown with confint.

    + +
    ...

    Additional arguments for methods.

    + +
    level

    Not used, but here for compatibility with generic confint function.

    +
    -
    -

    Value

    -

    A vector that contains the estimates and standard errors for No and p if just.ests=TRUE or (default) a list with at least the following items:

    • catch The original vector of observed catches.

    • +
      +

      Value

      + + +

      A vector that contains the estimates and standard errors for No and p if just.ests=TRUE or (default) a list with at least the following items:

      +

      +
      • catch The original vector of observed catches.

      • method The method used (provided by the user).

      • lbl A descriptive label for the method used.

      • est A matrix that contains the estimates and standard errors for No and p.

      • -

      In addition, if the Moran or Schnute methods are used the list will also contain

      • min.nlogLH The minimum value of the negative log-likelihood function.

      • +

      In addition, if the Moran or Schnute methods are used the list will also contain

      +

      +
      • min.nlogLH The minimum value of the negative log-likelihood function.

      • Tmult The Tmult value sent by the user.

      -
      -

      Details

      +
      +

      Details

      The main function computes the estimates and associated standard errors, if possible, for the initial population size, No, and probability of capture, p, for eight methods chosen with method=. The possible methods are:

      • method="CarleStrub": The general weighted k-pass estimator proposed by Carle and Strub (1978). This function iteratively solves for No in equation 7 of Carle and Strub (1978).

      • method="Zippin": The general k-pass estimator generally attributed to Zippin. This function iteratively solves for No in bias corrected version of equation 3 (page 622) of Carle and Strub (1978). These results are not yet trustworthy (see Testing section below).

      • method="Seber3": The special case for k=3 estimator shown in equation 7.24 of Seber(2002).

      • @@ -149,8 +185,8 @@

        Details

        Confidence intervals for the next two methods use likelihood ratio theory as described in Schnute (1983) and are only produced for the No parameter. Standard errors are not produced with the Moran or Schnute methods.

        Confidence intervals for the last method are computed as per Ken Burnham's instructions for the Burnham Method (Jack Van Deventer, personal communication). Specifically, they are calculated with the t-statistic and No-1 degrees of freedom. Please note that the MicroFish software rounds the t-statistic before it calculates the confidence intervals about No and p. If you need the confidence interals produced by FSA::removal to duplicate MicroFish, please use CIMicroFish=TRUE.

      -
      -

      testing

      +
      +

      testing

      The Carle-Strub method matches the examples in Carle and Strub (1978) for No, p, and the variance of No. The Carle-Strub estimates of No and p match the examples in Cowx (1983) but the SE of No does not. The Carle-Strub estimates of No match the results (for estimates that they did not reject) from Jones and Stockwell (1995) to within 1 individual in most instances and within 1% for all other instances (e.g., off by 3 individuals when the estimate was 930 individuals).

      The Seber3 results for No match the results in Cowx (1983).

      The Seber2 results for No, p, and the SE of No match the results in example 7.4 of Seber (2002) and in Cowx (1983).

      @@ -159,13 +195,13 @@

      testing

      The Moran and Schnute methods match the examples in Schnute (1983) perfectly for all point estimates and within 0.1 units for all confidence intervals.

      The Burnham method was tested against the free (gratis) Demo Version of MicroFish 3.0. Powell Wheeler used R to simulate 100, three-pass removal samples with capture probabilities between 0 and 1 and population sizes <= 1000. The Burnham method implemented here exactly matched MicroFish in all 100 trials for No and p. In addition, the CIs for No exactly matched all 100 trials when CIMicroFish=TRUE. Powell was not able to check the CIs for p because the MicroFish 'Quick Population Estimate' does not report them.

      -
      -

      IFAR Chapter

      +
      +

      IFAR Chapter

      10-Abundance from Depletion Data.

      -
      -

      References

      -

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      +
      +

      References

      +

      Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

      Carle, F.L. and M.R. Strub. 1978. A new method for estimating population size from removal data. Biometrics, 34:621-630.

      Cowx, I.G. 1983. Review of the methods for estimating fish population size from survey removal data. Fisheries Management, 14:67-82.

      Moran, P.A.P. 1951. A mathematical theory of animal trapping. Biometrika 38:307-311.

      @@ -176,150 +212,150 @@

      References

      Van Deventer, J.S. 1989. Microcomputer Software System for Generating Population Statistics from Electrofishing Data--User's Guide for MicroFish 3.0. USDA Forest Service, General Technical Report INT-254. 29 p. [Was (is?) from https://relicensing.pcwa.net/documents/Library/PCWA-L

      Van Deventer, J.S., and W.S. Platts. 1983. Sampling and estimating fish populations from streams. Transactions of the 48th North American Wildlife and Natural Resource Conference. pp. 349-354.

      -
      -

      See also

      +
      +

      See also

      See depletion for related functionality.

      -
      -

      Author

      -

      Derek H. Ogle, derek@derekogle.com

      +
      +

      Author

      +

      Derek H. Ogle, DerekOgle51@gmail.com

      A. Powell Wheeler, powell.wheeler@gmail.com

      -
      -

      Examples

      -
      ## First example -- 3 passes
      -ct3 <- c(77,50,37)
      -
      -# Carle Strub (default) method
      -p1 <- removal(ct3)
      -summary(p1)
      +    
      +

      Examples

      +
      ## First example -- 3 passes
      +ct3 <- c(77,50,37)
      +
      +# Carle Strub (default) method
      +p1 <- removal(ct3)
      +summary(p1)
       #>       Estimate Std. Error
       #> No 233.0000000 31.3578504
       #> p    0.3313131  0.0666816
      -summary(p1,verbose=TRUE)
      +summary(p1,verbose=TRUE)
       #> The Carle & Strub (1978) K-Pass Removal Method was used.
       #>       Estimate Std. Error
       #> No 233.0000000 31.3578504
       #> p    0.3313131  0.0666816
      -summary(p1,parm="No")
      +summary(p1,parm="No")
       #>    Estimate Std. Error
       #> No      233   31.35785
      -summary(p1,parm="p")
      +summary(p1,parm="p")
       #>    Estimate Std. Error
       #> p 0.3313131  0.0666816
      -confint(p1)
      +confint(p1)
       #>        95% LCI     95% UCI
       #> No 171.5397426 294.4602574
       #> p    0.2006195   0.4620067
      -confint(p1,parm="No")
      +confint(p1,parm="No")
       #>     95% LCI  95% UCI
       #> No 171.5397 294.4603
      -confint(p1,parm="p")
      +confint(p1,parm="p")
       #>     95% LCI   95% UCI
       #> p 0.2006195 0.4620067
      -
      -# Moran method
      -p2 <- removal(ct3,method="Moran")
      -summary(p2,verbose=TRUE)
      +
      +# Moran method
      +p2 <- removal(ct3,method="Moran")
      +summary(p2,verbose=TRUE)
       #> The Moran (1951) K-Pass Removal Method was used (SEs not computed).
       #>       Estimate
       #> No 237.5965440
       #> p    0.3223336
      -confint(p2,verbose=TRUE)
      +confint(p2,verbose=TRUE)
       #>    95% LCI 95% UCI
       #> No   194.7   370.9
      -#'
      -# Schnute method
      -p3 <- removal(ct3,method="Schnute")
      -summary(p3,verbose=TRUE)
      +#'
      +# Schnute method
      +p3 <- removal(ct3,method="Schnute")
      +summary(p3,verbose=TRUE)
       #> The Schnute (1983) K-Pass Removal Method w/ Non-constant Initial Catchability was used (SEs not computed).
       #>       Estimate
       #> No 245.0955550
       #> p    0.3039927
       #> p1   0.3141632
      -confint(p3,verbose=TRUE)
      +confint(p3,verbose=TRUE)
       #> An upper confidence value for 'No' cannot be determined.
       #>    95% LCI 95% UCI
       #> No   183.9     Inf
      -
      -# Burnham method
      -p4 <- removal(ct3,method="Burnham")
      -summary(p4)
      +
      +# Burnham method
      +p4 <- removal(ct3,method="Burnham")
      +summary(p4)
       #>       Estimate Std. Error
       #> No 238.0000000 33.8404319
       #> p    0.3215686  0.0673948
      -summary(p4,verbose=TRUE)
      +summary(p4,verbose=TRUE)
       #> The Burnham K-Pass Removal Method (Van Deventer and Platts 1983) was used.
       #>       Estimate Std. Error
       #> No 238.0000000 33.8404319
       #> p    0.3215686  0.0673948
      -summary(p4,parm="No")
      +summary(p4,parm="No")
       #>    Estimate Std. Error
       #> No      238   33.84043
      -summary(p4,parm="p")
      +summary(p4,parm="p")
       #>    Estimate Std. Error
       #> p 0.3215686  0.0673948
      -confint(p4)
      +confint(p4)
       #>        95% LCI     95% UCI
       #> No 171.3335366 304.6664634
       #> p    0.1887992   0.4543381
      -confint(p4,parm="No")
      +confint(p4,parm="No")
       #>     95% LCI  95% UCI
       #> No 171.3335 304.6665
      -confint(p4,parm="p")
      +confint(p4,parm="p")
       #>     95% LCI   95% UCI
       #> p 0.1887992 0.4543381
      -## Second example -- 2 passes
      -ct2 <- c(77,37)
      -
      -# Seber method
      -p4 <- removal(ct2,method="Seber2")
      -summary(p4,verbose=TRUE)
      +## Second example -- 2 passes
      +ct2 <- c(77,37)
      +
      +# Seber method
      +p4 <- removal(ct2,method="Seber2")
      +summary(p4,verbose=TRUE)
       #> The Seber (2002) 2-Pass Removal Method was used.
       #>       Estimate Std. Error
       #> No 148.2250000 19.0118725
       #> p    0.5194805  0.0961208
      -confint(p4)
      +confint(p4)
       #>        95% LCI     95% UCI
       #> No 110.9624147 185.4875853
       #> p    0.3310873   0.7078737
      -
      -
      -### Test if catchability differs between first sample and the other samples
      -# chi-square test statistic from  negative log-likelihoods
      -#   from Moran and Schnute fits (from above)
      -chi2.val <- 2*(p2$min.nlogLH-p3$min.nlogLH)
      -# p-value ... no significant difference
      -pchisq(chi2.val,df=1,lower.tail=FALSE)
      +
      +
      +### Test if catchability differs between first sample and the other samples
      +# chi-square test statistic from  negative log-likelihoods
      +#   from Moran and Schnute fits (from above)
      +chi2.val <- 2*(p2$min.nlogLH-p3$min.nlogLH)
      +# p-value ... no significant difference
      +pchisq(chi2.val,df=1,lower.tail=FALSE)
       #> [1] 0.8882765
      -
      -# Another LRT example ... sample 1 from Schnute (1983)
      -ct4 <- c(45,11,18,8)
      -p2a <- removal(ct4,method="Moran")
      -p3a <- removal(ct4,method="Schnute")
      -chi2.val <- 2*(p2a$min.nlogLH-p3a$min.nlogLH)  # 4.74 in Schnute(1983)
      -pchisq(chi2.val,df=1,lower.tail=FALSE)         # significant difference (catchability differs)
      +
      +# Another LRT example ... sample 1 from Schnute (1983)
      +ct4 <- c(45,11,18,8)
      +p2a <- removal(ct4,method="Moran")
      +p3a <- removal(ct4,method="Schnute")
      +chi2.val <- 2*(p2a$min.nlogLH-p3a$min.nlogLH)  # 4.74 in Schnute(1983)
      +pchisq(chi2.val,df=1,lower.tail=FALSE)         # significant difference (catchability differs)
       #> [1] 0.02955309
      -summary(p3a)
      +summary(p3a)
       #>       Estimate
       #> No 123.5879686
       #> p    0.1890032
       #> p1   0.3641131
      -
      -
      -### Using lapply() to use removal() on many different groups
      -###   with the removals in a single variable ("long format")
      -## create a dummy data frame
      -lake <- factor(rep(c("Ash Tree","Bark","Clay"),each=5))
      -year <- factor(rep(c("2010","2011","2010","2011","2010","2011"),times=c(2,3,3,2,2,3)))
      -pass <- factor(c(1,2,1,2,3,1,2,3,1,2,1,2,1,2,3))
      -catch <- c(57,34,65,34,12,54,26,9,54,27,67,34,68,35,12)
      -d <- data.frame(lake,year,pass,catch)
      -
      -## create a variable that indicates each different group
      -d$group <- with(d,interaction(lake,year))
      -d
      +
      +
      +### Using lapply() to use removal() on many different groups
      +###   with the removals in a single variable ("long format")
      +## create a dummy data frame
      +lake <- factor(rep(c("Ash Tree","Bark","Clay"),each=5))
      +year <- factor(rep(c("2010","2011","2010","2011","2010","2011"),times=c(2,3,3,2,2,3)))
      +pass <- factor(c(1,2,1,2,3,1,2,3,1,2,1,2,1,2,3))
      +catch <- c(57,34,65,34,12,54,26,9,54,27,67,34,68,35,12)
      +d <- data.frame(lake,year,pass,catch)
      +
      +## create a variable that indicates each different group
      +d$group <- with(d,interaction(lake,year))
      +d
       #>        lake year pass catch         group
       #> 1  Ash Tree 2010    1    57 Ash Tree.2010
       #> 2  Ash Tree 2010    2    34 Ash Tree.2010
      @@ -336,19 +372,19 @@ 

      Examples

      #> 13 Clay 2011 1 68 Clay.2011 #> 14 Clay 2011 2 35 Clay.2011 #> 15 Clay 2011 3 12 Clay.2011 -## split the catch by the different groups (creates a list of catch vectors) -ds <- split(d$catch,d$group) -## apply removal() to each catch vector (i.e., different group) -res <- lapply(ds,removal,just.ests=TRUE) -res <- data.frame(t(data.frame(res,check.names=FALSE))) -## get rownames from above and split into separate columns -nms <- t(data.frame(strsplit(rownames(res),"\\."))) -attr(nms,"dimnames") <- NULL -fnl <- data.frame(nms,res) -## put names together with values -rownames(fnl) <- NULL -colnames(fnl)[1:2] <- c("Lake","Year") -fnl +## split the catch by the different groups (creates a list of catch vectors) +ds <- split(d$catch,d$group) +## apply removal() to each catch vector (i.e., different group) +res <- lapply(ds,removal,just.ests=TRUE) +res <- data.frame(t(data.frame(res,check.names=FALSE))) +## get rownames from above and split into separate columns +nms <- t(data.frame(strsplit(rownames(res),"\\."))) +attr(nms,"dimnames") <- NULL +fnl <- data.frame(nms,res) +## put names together with values +rownames(fnl) <- NULL +colnames(fnl)[1:2] <- c("Lake","Year") +fnl #> Lake Year No No.se No.LCI No.UCI p p.se p.LCI #> 1 Ash Tree 2010 130 26.108558 78.82817 181.1718 0.4482759 0.12120594 0.2107166 #> 2 Bark 2010 95 4.247687 86.67469 103.3253 0.5894040 0.06418406 0.4636055 @@ -363,24 +399,24 @@

      Examples

      #> 4 0.6757103 #> 5 0.7518961 #> 6 0.6785262 - - -### Using apply() to use removal() on many different groups -### with the removals in several variables ("wide format") -## create a dummy data frame (just reshaped from above as -## an example; -5 to ignore the group variable from above) -d1 <- reshape(d[,-5],timevar="pass",idvar=c("lake","year"),direction="wide") -## apply restore() to each row of only the catch data -res1 <- apply(d1[,3:5],MARGIN=1,FUN=removal,method="CarleStrub",just.ests=TRUE) + + +### Using apply() to use removal() on many different groups +### with the removals in several variables ("wide format") +## create a dummy data frame (just reshaped from above as +## an example; -5 to ignore the group variable from above) +d1 <- reshape(d[,-5],timevar="pass",idvar=c("lake","year"),direction="wide") +## apply restore() to each row of only the catch data +res1 <- apply(d1[,3:5],MARGIN=1,FUN=removal,method="CarleStrub",just.ests=TRUE) #> Warning: 'NA's removed from 'catch' to continue. #> Warning: 'NA's removed from 'catch' to continue. #> Warning: 'NA's removed from 'catch' to continue. -res1 <- data.frame(t(data.frame(res1,check.names=FALSE))) -## add the grouping information to the results -fnl1 <- data.frame(d1[,1:2],res1) -## put names together with values -rownames(fnl1) <- NULL -fnl1 +res1 <- data.frame(t(data.frame(res1,check.names=FALSE))) +## add the grouping information to the results +fnl1 <- data.frame(d1[,1:2],res1) +## put names together with values +rownames(fnl1) <- NULL +fnl1 #> lake year No No.se No.LCI No.UCI p p.se p.LCI #> 1 Ash Tree 2010 130 26.108558 78.82817 181.1718 0.4482759 0.12120594 0.2107166 #> 2 Ash Tree 2011 121 5.771511 109.68805 132.3120 0.5577889 0.06016508 0.4398676 @@ -395,29 +431,25 @@

      Examples

      #> 4 0.7518961 #> 5 0.7226831 #> 6 0.6785262 - +
      -
      - -
      +
    -
    +
    - diff --git a/docs/reference/se.html b/docs/reference/se.html index 35e0067b..4261ed3b 100644 --- a/docs/reference/se.html +++ b/docs/reference/se.html @@ -1,130 +1,132 @@ -Computes standard error of the mean. — se • FSAComputes standard error of the mean. — se • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Computes the standard error of the mean (i.e., standard deviation divided by the square root of the sample size).

    -
    -
    se(x, na.rm = TRUE)
    +
    +

    Usage

    +
    se(x, na.rm = TRUE)
    -
    -

    Arguments

    +
    +

    Arguments

    x

    A numeric vector.

    + +
    na.rm

    A logical that indicates whether missing values should be removed before computing the standard error.

    +
    -
    -

    Value

    -

    A single numeric that is the standard error of the mean of x.

    +
    +

    Value

    + + +

    A single numeric that is the standard error of the mean of x.

    -
    -

    See also

    +
    +

    See also

    See se in sciplot for similar functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    x <- 1:20
    -sd(x)/sqrt(length(x))
    +    
    +

    Examples

    +
    x <- 1:20
    +sd(x)/sqrt(length(x))
     #> [1] 1.322876
    -se(x)
    +se(x)
     #> [1] 1.322876
    -
    -# all return NA if missing values are not removed
    -x2 <- c(x,NA)
    -sd(x2)/sqrt(length(x2))
    +
    +# all return NA if missing values are not removed
    +x2 <- c(x,NA)
    +sd(x2)/sqrt(length(x2))
     #> [1] NA
    -
    -# Better if missing values are removed
    -se(x2,na.rm=FALSE)
    +
    +# Better if missing values are removed
    +se(x2,na.rm=FALSE)
     #> [1] NA
    -sd(x2,na.rm=TRUE)/sqrt(length(x2[complete.cases(x2)]))
    +sd(x2,na.rm=TRUE)/sqrt(length(x2[complete.cases(x2)]))
     #> [1] 1.322876
    -se(x2)
    +se(x2)
     #> [1] 1.322876
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/srStarts.html b/docs/reference/srStarts.html index c09325a8..e321d63f 100644 --- a/docs/reference/srStarts.html +++ b/docs/reference/srStarts.html @@ -1,129 +1,157 @@ -Finds reasonable starting values for parameters in specific parameterizations of common stock-recruitment models. — srStarts • FSAFinds reasonable starting values for parameters in specific parameterizations of common stock-recruitment models. — srStarts • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Finds reasonable starting values for parameters in specific parameterizations of the “Beverton-Holt”, “Ricker”, “Shepherd”, or “Saila-Lorda” stock-recruitment models. Use srFunShow() to see the equations of each model.

    -
    -
    srStarts(
    -  formula,
    -  data = NULL,
    -  type = c("BevertonHolt", "Ricker", "Shepherd", "SailaLorda", "independence"),
    -  param = 1,
    -  fixed = NULL,
    -  plot = FALSE,
    -  col.mdl = "gray70",
    -  lwd.mdl = 3,
    -  lty.mdl = 1,
    -  cex.main = 0.9,
    -  col.main = "red",
    -  dynamicPlot = FALSE,
    -  ...
    -)
    +
    +

    Usage

    +
    srStarts(
    +  formula,
    +  data = NULL,
    +  type = c("BevertonHolt", "Ricker", "Shepherd", "SailaLorda", "independence"),
    +  param = 1,
    +  fixed = NULL,
    +  plot = FALSE,
    +  col.mdl = "gray70",
    +  lwd.mdl = 3,
    +  lty.mdl = 1,
    +  cex.main = 0.9,
    +  col.main = "red",
    +  dynamicPlot = FALSE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    formula

    A formula of the form Recruits~Stock.

    + +
    data

    A data frame in which Recruits and Stock are found.

    + +
    type

    A string that indicates the type of the stock-recruitment model. Must be one of "BevertonHolt", "Ricker", "Shepherd", or "SailaLorda".

    + +
    param

    A numeric that indicates the parameterization of the stock-recruitment model type. This is ignored if type="Shepherd" or type="SailaLorda"

    + +
    fixed

    A named list that contains user-defined rather than automatically generated (i.e., fixed) starting values for one or more parameters. See details.

    + +
    plot

    A logical that indicates whether or not a plot of the data with the model fit at the starting values superimposed is created.

    + +
    col.mdl

    A color for the model when plot=TRUE.

    + +
    lwd.mdl

    A line width for the model when plot=TRUE.

    + +
    lty.mdl

    A line type for the model when plot=TRUE.

    + +
    cex.main

    A character expansion value for the main title when plot=TRUE.

    + +
    col.main

    A color for the main title when plot=TRUE.

    + +
    dynamicPlot

    DEPRECATED.

    + +
    ...

    Further arguments passed to the methods.

    +
    -
    -

    Value

    -

    A list that contains reasonable starting values. Note that the parameters will be listed in the same order and with the same names as listed in srFuns.

    +
    +

    Value

    + + +

    A list that contains reasonable starting values. Note that the parameters will be listed in the same order and with the same names as listed in srFuns.

    -
    -

    Details

    +
    +

    Details

    This function attempts to find reasonable starting values for a variety of parameterizations of the “Beverton-Holt”, “Ricker”, “Shepherd”, or “Saila-Lorda” stock-recruitment models. There is no guarantee that these starting values are the ‘best’ starting values. One should use them with caution and should perform sensitivity analyses to determine the impact of different starting values on the final model results.

    Starting values for the first parameterization of the Beverton-Holt model were derived by linearizing the function (inverting both sides and simplifying), fitting a linear model to the observed data, and extracting parameter values from the corresponding linear model parameters. Starting values for the other parameterizations of the Beverton-Holt model were derived from known relationships between the parameters of each parameterization and the first parameterization. If the computed starting value for the Rp parameter was larger than the largest observed recruitment value, then the starting value for Rp was set to the largest observed recruitment value.

    Starting values for the Shepherd function were the same as those for the first parameterization of the Beverton-Holt function with the addition that c=1.

    Starting values for the Ricker parameterizations followed the same general procedure as described for the Beverton-Holt parameterizations. If the computed starting value for atilde was less than zero then the starting value was set to 0.00001.

    Starting values for the Saila-Lorda function were the same as those for the first parameterization of the Ricker function with the addition that c=1.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    13-Recruitment.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    Beverton, R.J.H. and S.J. Holt. 1957. On the dynamics of exploited fish populations, Fisheries Investigations (Series 2), volume 19. United Kingdom Ministry of Agriculture and Fisheries, 533 pp.

    Iles, T.C. 1994. A review of stock-recruitment relationships with reference to flatfish populations. Netherlands Journal of Sea Research 32:399-420.

    Quinn II, T.J. and R.B. Deriso. 1999. Quantitative Fish Dynamics. Oxford University Press.

    @@ -131,68 +159,68 @@

    References

    Ricker, W.E. 1975. Computation and interpretation of biological statistics of fish populations. Technical Report Bulletin 191, Bulletin of the Fisheries Research Board of Canada. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/1485.pdf.]

    Shepherd, J. 1982. A versatile new stock-recruitment relationship for fisheries and construction of sustainable yield curves. Journal du Conseil International pour l'Exploration de la Mar 40:67-75.

    -
    -

    See also

    +
    +

    See also

    See srFunShow and srFuns for related functionality. See nlsTracePlot for help troubleshooting nonlinear models that don't converge.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Simple Examples
    -srStarts(recruits~stock,data=CodNorwegian)
    +    
    +

    Examples

    +
    ## Simple Examples
    +srStarts(recruits~stock,data=CodNorwegian)
     #> $a
     #> [1] 6.040326
     #> 
     #> $b
     #> [1] 0.08789475
     #> 
    -srStarts(recruits~stock,data=CodNorwegian,param=2)
    +srStarts(recruits~stock,data=CodNorwegian,param=2)
     #> $a
     #> [1] 6.040326
     #> 
     #> $Rp
     #> [1] 68.72227
     #> 
    -srStarts(recruits~stock,data=CodNorwegian,param=3)
    +srStarts(recruits~stock,data=CodNorwegian,param=3)
     #> $a
     #> [1] 0.165554
     #> 
     #> $b
     #> [1] 0.01455132
     #> 
    -srStarts(recruits~stock,data=CodNorwegian,param=4)
    +srStarts(recruits~stock,data=CodNorwegian,param=4)
     #> $a
     #> [1] 0.165554
     #> 
     #> $Rp
     #> [1] 68.72227
     #> 
    -srStarts(recruits~stock,data=CodNorwegian,type="Ricker")
    +srStarts(recruits~stock,data=CodNorwegian,type="Ricker")
     #> $a
     #> [1] 5.919256
     #> 
     #> $b
     #> [1] 0.01836117
     #> 
    -srStarts(recruits~stock,data=CodNorwegian,type="Ricker",param=2)
    +srStarts(recruits~stock,data=CodNorwegian,type="Ricker",param=2)
     #> $a
     #> [1] 1.778211
     #> 
     #> $b
     #> [1] 0.01836117
     #> 
    -srStarts(recruits~stock,data=CodNorwegian,type="Ricker",param=3)
    +srStarts(recruits~stock,data=CodNorwegian,type="Ricker",param=3)
     #> $a
     #> [1] 5.919256
     #> 
     #> $Rp
     #> [1] 118.5966
     #> 
    -srStarts(recruits~stock,data=CodNorwegian,type="Shepherd")
    +srStarts(recruits~stock,data=CodNorwegian,type="Shepherd")
     #> $a
     #> [1] 6.040326
     #> 
    @@ -202,7 +230,7 @@ 

    Examples

    #> $c #> [1] 1 #> -srStarts(recruits~stock,data=CodNorwegian,type="SailaLorda") +srStarts(recruits~stock,data=CodNorwegian,type="SailaLorda") #> $a #> [1] 5.919256 #> @@ -212,13 +240,13 @@

    Examples

    #> $c #> [1] 1 #> -srStarts(recruits~stock,data=CodNorwegian,type="independence") +srStarts(recruits~stock,data=CodNorwegian,type="independence") #> $a #> [1] 0.630291 #> - -## Simple Examples with a Plot -srStarts(recruits~stock,data=CodNorwegian,type="Ricker",plot=TRUE) + +## Simple Examples with a Plot +srStarts(recruits~stock,data=CodNorwegian,type="Ricker",plot=TRUE) #> $a #> [1] 5.919256 @@ -226,7 +254,7 @@

    Examples

    #> $b #> [1] 0.01836117 #> -srStarts(recruits~stock,data=CodNorwegian,type="BevertonHolt",plot=TRUE) +srStarts(recruits~stock,data=CodNorwegian,type="BevertonHolt",plot=TRUE) #> $a #> [1] 6.040326 @@ -234,7 +262,7 @@

    Examples

    #> $b #> [1] 0.08789475 #> -srStarts(recruits~stock,data=CodNorwegian,type="Shepherd",plot=TRUE) +srStarts(recruits~stock,data=CodNorwegian,type="Shepherd",plot=TRUE) #> $a #> [1] 6.040326 @@ -245,7 +273,7 @@

    Examples

    #> $c #> [1] 1 #> -srStarts(recruits~stock,data=CodNorwegian,type="SailaLorda",plot=TRUE) +srStarts(recruits~stock,data=CodNorwegian,type="SailaLorda",plot=TRUE) #> $a #> [1] 5.919256 @@ -256,36 +284,32 @@

    Examples

    #> $c #> [1] 1 #> -srStarts(recruits~stock,data=CodNorwegian,type="independence",plot=TRUE) +srStarts(recruits~stock,data=CodNorwegian,type="independence",plot=TRUE) #> $a #> [1] 0.630291 #> - -## See examples in srFuns() for use of srStarts() when fitting stock-recruit models - + +## See examples in srFuns() for use of srStarts() when fitting stock-recruit models +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/sumTable.html b/docs/reference/sumTable.html index 1ff1b922..8aa45e62 100644 --- a/docs/reference/sumTable.html +++ b/docs/reference/sumTable.html @@ -1,110 +1,122 @@ -Creates a one- or two-way table of summary statistics. — sumTable • FSACreates a one- or two-way table of summary statistics. — sumTable • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Creates a one- or two-way table of summary statistics for a quantitative variable.

    -
    -
    sumTable(formula, ...)
    -
    -# S3 method for formula
    -sumTable(formula, data = NULL, FUN = mean, digits = getOption("digits"), ...)
    +
    +

    Usage

    +
    sumTable(formula, ...)
    +
    +# S3 method for formula
    +sumTable(formula, data = NULL, FUN = mean, digits = getOption("digits"), ...)
    -
    -

    Arguments

    +
    +

    Arguments

    formula

    A formula with a quantitative variable on the left-hand-side and one or two factor variables on the right-hand-side. See details.

    + +
    ...

    Other arguments to pass through to FUN.

    + +
    data

    An optional data frame that contains the variables in formula.

    + +
    FUN

    A scalar function that identifies the summary statistics. Applied to the quantitative variable for all data subsets identified by the combination of the factor(s). Defaults to mean.

    + +
    digits

    A single numeric that indicates the number of digits to be used for the result.

    +
    -
    -

    Value

    -

    A one-way array of values if only one factor variable is supplied on the right-hand-side of formula. A two-way matrix of values if two factor variables are supplied on the right-hand-side of formula. These are the same classes of objects returned by tapply.

    +
    +

    Value

    + + +

    A one-way array of values if only one factor variable is supplied on the right-hand-side of formula. A two-way matrix of values if two factor variables are supplied on the right-hand-side of formula. These are the same classes of objects returned by tapply.

    -
    -

    Details

    +
    +

    Details

    The formula must be of the form quantitative~factor or quantitative~factor*factor2 where quantitative is the quantitative variable to construct the summaries for and factor and factor2 are factor variables that contain the levels for which separate summaries should be constructed. If the variables on the right-hand-side are not factors, then they will be coerced to be factors and a warning will be issued.

    This function is largely a wrapper to tapply(), but only works for one quantitative variable on the left-hand-side and one or two factor variables on the right-hand-side. Consider using tapply for situations with more factors on the right-hand-side.

    -
    -

    See also

    +
    +

    See also

    See tapply for a more general implementation. See Summarize for a similar computation when only one factor variable is given.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## The same examples as in the old aggregate.table in gdata package
    -## but data in data.frame to illustrate formula notation
    -d <- data.frame(g1=sample(letters[1:5], 1000, replace=TRUE),
    -                g2=sample(LETTERS[1:3], 1000, replace=TRUE),
    -                dat=rnorm(1000))
    -
    -sumTable(dat~g1*g2,data=d,FUN=length)       # get sample size
    +    
    +

    Examples

    +
    ## The same examples as in the old aggregate.table in gdata package
    +## but data in data.frame to illustrate formula notation
    +d <- data.frame(g1=sample(letters[1:5], 1000, replace=TRUE),
    +                g2=sample(LETTERS[1:3], 1000, replace=TRUE),
    +                dat=rnorm(1000))
    +
    +sumTable(dat~g1*g2,data=d,FUN=length)       # get sample size
     #> Warning: First RHS variable was converted to a factor.
     #> Warning: Second RHS variable was converted to a factor.
     #>    A  B  C
    @@ -113,7 +125,7 @@ 

    Examples

    #> c 61 83 75 #> d 75 57 50 #> e 57 79 70 -sumTable(dat~g1*g2,data=d,FUN=validn) # get sample size (better way) +sumTable(dat~g1*g2,data=d,FUN=validn) # get sample size (better way) #> Warning: First RHS variable was converted to a factor. #> Warning: Second RHS variable was converted to a factor. #> A B C @@ -122,7 +134,7 @@

    Examples

    #> c 61 83 75 #> d 75 57 50 #> e 57 79 70 -sumTable(dat~g1*g2,data=d,FUN=mean) # get mean +sumTable(dat~g1*g2,data=d,FUN=mean) # get mean #> Warning: First RHS variable was converted to a factor. #> Warning: Second RHS variable was converted to a factor. #> A B C @@ -131,7 +143,7 @@

    Examples

    #> c 0.2214661 0.0308539 -0.0028557 #> d -0.0024910 -0.1756349 -0.0269173 #> e -0.1744024 0.0166841 -0.0942160 -sumTable(dat~g1*g2,data=d,FUN=sd) # get sd +sumTable(dat~g1*g2,data=d,FUN=sd) # get sd #> Warning: First RHS variable was converted to a factor. #> Warning: Second RHS variable was converted to a factor. #> A B C @@ -140,7 +152,7 @@

    Examples

    #> c 1.0477833 1.0044162 1.0192641 #> d 0.9593495 1.0464473 0.9969988 #> e 1.0720302 0.9515544 1.0513543 -sumTable(dat~g1*g2,data=d,FUN=sd,digits=1) # show digits= argument +sumTable(dat~g1*g2,data=d,FUN=sd,digits=1) # show digits= argument #> Warning: First RHS variable was converted to a factor. #> Warning: Second RHS variable was converted to a factor. #> A B C @@ -149,50 +161,46 @@

    Examples

    #> c 1.0 1 1.0 #> d 1.0 1 1.0 #> e 1.1 1 1.1 - -## Also demonstrate use in the 1-way example -- but see Summarize() -sumTable(dat~g1,data=d,FUN=validn) + +## Also demonstrate use in the 1-way example -- but see Summarize() +sumTable(dat~g1,data=d,FUN=validn) #> Warning: RHS variable was converted to a factor. #> a b c d e #> 203 190 219 182 206 -sumTable(dat~g1,data=d,FUN=mean) +sumTable(dat~g1,data=d,FUN=mean) #> Warning: RHS variable was converted to a factor. #> a b c d e #> 0.0118431 -0.1674870 0.0724024 -0.0634279 -0.0738739 - -## Example with a missing value (compare to above) -d$dat[1] <- NA -sumTable(dat~g1,data=d,FUN=validn) # note use of validn + +## Example with a missing value (compare to above) +d$dat[1] <- NA +sumTable(dat~g1,data=d,FUN=validn) # note use of validn #> Warning: RHS variable was converted to a factor. #> a b c d e #> 203 190 219 181 206 -sumTable(dat~g1,data=d,FUN=mean,na.rm=TRUE) +sumTable(dat~g1,data=d,FUN=mean,na.rm=TRUE) #> Warning: RHS variable was converted to a factor. #> a b c d e #> 0.0118431 -0.1674870 0.0724024 -0.0663266 -0.0738739 - +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/tictactoe.html b/docs/reference/tictactoe.html index 82a095f4..11dd19b4 100644 --- a/docs/reference/tictactoe.html +++ b/docs/reference/tictactoe.html @@ -1,194 +1,210 @@ -Construct a base tic-tac-toe plot for presenting predator-prey PSD values. — tictactoe • FSAConstruct a base tic-tac-toe plot for presenting predator-prey PSD values. — tictactoe • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Construct a base tic-tac-toe plot for presenting predator-prey PSD values. Predator-prey PSD values are added with plotCI from plotrix.

    -
    -
    tictactoe(
    -  predobj = c(30, 70),
    -  preyobj = c(30, 70),
    -  predlab = "Predator PSD",
    -  preylab = "Prey PSD",
    -  obj.col = "black",
    -  obj.trans = 0.2,
    -  bnd.col = "black",
    -  bnd.lwd = 1,
    -  bnd.lty = 2
    -)
    +
    +

    Usage

    +
    tictactoe(
    +  predobj = c(30, 70),
    +  preyobj = c(30, 70),
    +  predlab = "Predator PSD",
    +  preylab = "Prey PSD",
    +  obj.col = "black",
    +  obj.trans = 0.2,
    +  bnd.col = "black",
    +  bnd.lwd = 1,
    +  bnd.lty = 2
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    predobj

    A vector of length 2 that contains the target objective range for the predator.

    + +
    preyobj

    A vector of length 2 that contains the target objective range for the prey.

    + +
    predlab

    A string representing a label for the x-axis.

    + +
    preylab

    A string representing a label for the y-axis.

    + +
    obj.col

    A string designating a color to which the target objective regions should be shaded.

    + +
    obj.trans

    A numeric (decimal) that indicates the level of transparency for marking the target objective regions.

    + +
    bnd.col

    A string that indicates a color for the boundaries of the target objective regions.

    + +
    bnd.lwd

    A numeric that indicates the line width for the boundaries of the target objective regions.

    + +
    bnd.lty

    A numeric that indicates the line type for the boundaries of the target objective regions.

    +
    -
    -

    Value

    -

    None. However, a graphic is produced.

    +
    +

    Value

    + + +

    None. However, a graphic is produced.

    -
    -

    Details

    +
    +

    Details

    This function simply creates a base tic-tac-toe plot. Observed values, with confidence intervals, are added to this plot with plotCI from plotrix; see examples.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    6-Size Structure.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    See also

    +
    +

    See also

    See psdVal and psdCalc for related functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Create hypothetical data for plotting one point .. similar to what might come from psdCalc()
    -prey <- c(45.4,30.2,56.8)
    -pred <- c(24.5,10.2,36.7)
    -names(prey) <- names(pred) <- c("Estimate","95% LCI","95% UCI")
    -prey
    +    
    +

    Examples

    +
    ## Create hypothetical data for plotting one point .. similar to what might come from psdCalc()
    +prey <- c(45.4,30.2,56.8)
    +pred <- c(24.5,10.2,36.7)
    +names(prey) <- names(pred) <- c("Estimate","95% LCI","95% UCI")
    +prey
     #> Estimate  95% LCI  95% UCI 
     #>     45.4     30.2     56.8 
    -pred
    +pred
     #> Estimate  95% LCI  95% UCI 
     #>     24.5     10.2     36.7 
    -
    -tictactoe()
    -if (require(plotrix)) {
    -  plotCI(prey[1],pred[1],li=prey[2],ui=prey[3],err="x",pch=16,add=TRUE)
    -  plotCI(prey[1],pred[1],li=pred[2],ui=pred[3],err="y",pch=16,add=TRUE) 
    -}
    +
    +tictactoe()
    +if (require(plotrix)) {
    +  plotCI(prey[1],pred[1],li=prey[2],ui=prey[3],err="x",pch=16,add=TRUE)
    +  plotCI(prey[1],pred[1],li=pred[2],ui=pred[3],err="y",pch=16,add=TRUE) 
    +}
     #> Loading required package: plotrix
     
    -
    -## Create hypothetical data for plotting three points ... similar to what might come from psdCalc()
    -prey <- rbind(c(45.4,30.2,56.8),
    -              c(68.2,56.7,79.4),
    -              c(17.1, 9.5,26.3))
    -pred <- rbind(c(24.5,10.2,36.7),
    -              c(14.2, 7.1,21.3),
    -              c(16.3, 8.2,24.4))
    -colnames(prey) <- colnames(pred) <- c("Estimate","95% LCI","95% UCI")
    -prey
    +
    +## Create hypothetical data for plotting three points ... similar to what might come from psdCalc()
    +prey <- rbind(c(45.4,30.2,56.8),
    +              c(68.2,56.7,79.4),
    +              c(17.1, 9.5,26.3))
    +pred <- rbind(c(24.5,10.2,36.7),
    +              c(14.2, 7.1,21.3),
    +              c(16.3, 8.2,24.4))
    +colnames(prey) <- colnames(pred) <- c("Estimate","95% LCI","95% UCI")
    +prey
     #>      Estimate 95% LCI 95% UCI
     #> [1,]     45.4    30.2    56.8
     #> [2,]     68.2    56.7    79.4
     #> [3,]     17.1     9.5    26.3
    -pred
    +pred
     #>      Estimate 95% LCI 95% UCI
     #> [1,]     24.5    10.2    36.7
     #> [2,]     14.2     7.1    21.3
     #> [3,]     16.3     8.2    24.4
    -
    -tictactoe()
    -if (require(plotrix)) {
    -  plotCI(prey[,1],pred[,1],li=prey[,2],ui=prey[,3],err="x",pch=16,add=TRUE)
    -  plotCI(prey[,1],pred[,1],li=pred[,2],ui=pred[,3],err="y",pch=16,add=TRUE)
    -}
    -lines(prey[,1],pred[,1])
    -text(prey[,1],pred[,1],labels=c(2010,2011,2012),adj=c(-0.5,-0.5))
    +
    +tictactoe()
    +if (require(plotrix)) {
    +  plotCI(prey[,1],pred[,1],li=prey[,2],ui=prey[,3],err="x",pch=16,add=TRUE)
    +  plotCI(prey[,1],pred[,1],li=pred[,2],ui=pred[,3],err="y",pch=16,add=TRUE)
    +}
    +lines(prey[,1],pred[,1])
    +text(prey[,1],pred[,1],labels=c(2010,2011,2012),adj=c(-0.5,-0.5))
     
    -
    +
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/validn.html b/docs/reference/validn.html index 28945311..7ca3274c 100644 --- a/docs/reference/validn.html +++ b/docs/reference/validn.html @@ -1,133 +1,133 @@ -Finds the number of valid (non-NA) values in a vector. — validn • FSAFinds the number of valid (non-NA) values in a vector. — validn • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Finds the number of valid (non-NA) values in a vector.

    -
    -
    validn(object)
    +
    +

    Usage

    +
    validn(object)
    -
    -

    Arguments

    +
    +

    Arguments

    object

    A vector.

    +
    -
    -

    Value

    -

    A single numeric value that is the number of non-NA values in a vector.

    +
    +

    Value

    + + +

    A single numeric value that is the number of non-NA values in a vector.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    2-Basic Data Manipulations.

    -
    -

    See also

    +
    +

    See also

    See valid.n in plotrix and nobs in gdata for similar functionality. See is.na for finding the missing values.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    junk1 <- c(1,7,2,4,3,10,NA)
    -junk2 <- c("Derek","Hugh","Ogle","Santa","Claus","Nick",NA,NA)
    -junk3 <- factor(junk2)
    -junk4 <- c(TRUE,TRUE,FALSE,FALSE,FALSE,TRUE,NA,NA)
    -junk5 <- data.frame(junk1)
    -junk6 <- data.frame(junk3)
    -
    -validn(junk1)
    +    
    +

    Examples

    +
    junk1 <- c(1,7,2,4,3,10,NA)
    +junk2 <- c("Derek","Hugh","Ogle","Santa","Claus","Nick",NA,NA)
    +junk3 <- factor(junk2)
    +junk4 <- c(TRUE,TRUE,FALSE,FALSE,FALSE,TRUE,NA,NA)
    +junk5 <- data.frame(junk1)
    +junk6 <- data.frame(junk3)
    +
    +validn(junk1)
     #> [1] 6
    -validn(junk2)
    +validn(junk2)
     #> [1] 6
    -validn(junk3)
    +validn(junk3)
     #> [1] 6
    -validn(junk4)
    +validn(junk4)
     #> [1] 6
    -validn(junk5)
    +validn(junk5)
     #> [1] 6
    -validn(junk6)
    +validn(junk6)
     #> [1] 6
    - 
    + 
     
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/vbStarts.html b/docs/reference/vbStarts.html index a1276506..a7e9eef6 100644 --- a/docs/reference/vbStarts.html +++ b/docs/reference/vbStarts.html @@ -1,134 +1,172 @@ -Find reasonable starting values for a von Bertalanffy growth function. — vbStarts • FSAFind reasonable starting values for a von Bertalanffy growth function. — vbStarts • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Finds reasonable starting values for the parameters in a specific parameterization of the von Bertalanffy growth function.

    -
    -
    vbStarts(
    -  formula,
    -  data = NULL,
    -  param = c("Typical", "typical", "Traditional", "traditional", "BevertonHolt",
    -    "Original", "original", "vonBertalanffy", "GQ", "GallucciQuinn", "Mooij", "Weisberg",
    -    "Ogle", "Schnute", "Francis", "Somers", "Somers2", "Pauly"),
    -  type = param,
    -  fixed = NULL,
    -  meth0 = c("yngAge", "poly"),
    -  methLinf = c("Walford", "oldAge", "longFish"),
    -  num4Linf = 1,
    -  ages2use = NULL,
    -  methEV = c("means", "poly"),
    -  valOgle = NULL,
    -  plot = FALSE,
    -  col.mdl = "gray70",
    -  lwd.mdl = 3,
    -  lty.mdl = 1,
    -  cex.main = 0.9,
    -  col.main = "red",
    -  dynamicPlot = FALSE,
    -  ...
    -)
    +
    +

    Usage

    +
    vbStarts(
    +  formula,
    +  data = NULL,
    +  param = c("Typical", "typical", "Traditional", "traditional", "BevertonHolt",
    +    "Original", "original", "vonBertalanffy", "GQ", "GallucciQuinn", "Mooij", "Weisberg",
    +    "Ogle", "Schnute", "Francis", "Somers", "Somers2", "Pauly"),
    +  type = param,
    +  fixed = NULL,
    +  meth0 = c("yngAge", "poly"),
    +  methLinf = c("Walford", "oldAge", "longFish"),
    +  num4Linf = 1,
    +  ages2use = NULL,
    +  methEV = c("means", "poly"),
    +  valOgle = NULL,
    +  plot = FALSE,
    +  col.mdl = "gray70",
    +  lwd.mdl = 3,
    +  lty.mdl = 1,
    +  cex.main = 0.9,
    +  col.main = "red",
    +  dynamicPlot = FALSE,
    +  ...
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    formula

    A formula of the form len~age.

    + +
    data

    A data frame that contains the variables in formula.

    + +
    type, param

    A string that indicates the parameterization of the von Bertalanffy model.

    + +
    fixed

    A named list that contains user-defined rather than automatically generated (i.e., fixed) starting values for one or more parameters. See details.

    + +
    meth0

    A string that indicates how the t0 and L0 parameters should be derived. See details.

    + +
    methLinf

    A string that indicates how Linf should be derived. See details.

    + +
    num4Linf

    A single numeric that indicates how many of the longest fish (if methLinf="longFish") or how any of the oldest ages (if methLinf="oldAge") should be averaged to estimate a starting value for Linf.

    + +
    ages2use

    A numerical vector of the two ages to be used in the Schnute or Francis parameterizations. See details.

    + +
    methEV

    A string that indicates how the lengths of the two ages in the Schnute parameterization or the three ages in the Francis parameterization should be derived. See details.

    + +
    valOgle

    A single named numeric that is the set Lr or tr value for use in type="Ogle". See details.

    + +
    plot

    A logical that indicates whether a plot of the data with the superimposed model fit at the starting values should be created.

    + +
    col.mdl

    A color for the model when plot=TRUE.

    + +
    lwd.mdl

    A line width for the model when plot=TRUE.

    + +
    lty.mdl

    A line type for the model when plot=TRUE.

    + +
    cex.main

    A character expansion value for the main title when plot=TRUE.

    + +
    col.main

    A color for the main title when plot=TRUE.

    + +
    dynamicPlot

    DEPRECATED.

    + +
    ...

    Further arguments passed to the methods.

    +
    -
    -

    Value

    -

    A list that contains reasonable starting values. Note that the parameters will be listed in the same order and with the same names as listed in vbFuns.

    +
    +

    Value

    + + +

    A list that contains reasonable starting values. Note that the parameters will be listed in the same order and with the same names as listed in vbFuns.

    -
    -

    Details

    +
    +

    Details

    This function attempts to find reasonable starting values for a variety of parameterizations of the von Bertalanffy growth function. There is no guarantee that these starting values are the ‘best’ starting values. One should use them with caution and should perform sensitivity analyses to determine the impact of different starting values on the final model results.

    If methLinf="Walford", then the Linf and K parameters are estimated via the concept of the Ford-Walford plot. If methLinf="oldAge" then Linf is estimated as the mean length of the num4Linf longest observed lengths.

    The product of the starting values for Linf and K is used as a starting value for omega in the GallucciQuinn and Mooij parameterizations. The result of log(2) divided by the starting value for K is used as the starting value for t50 in the Weisberg parameterization.

    @@ -136,32 +174,32 @@

    Details

    Starting values for the L1 and L3 parameters in the Schnute parameterization and the L1, L2, and L3 parameters in the Francis parameterization may be found in two ways. If methEV="poly", then the starting values are the predicted length-at-age from a second-degree polynomial fit to the mean lengths-at-age data. If methEV="means" then the observed sample means at the corresponding ages are used. In the case where one of the supplied ages is fractional, then the value returned will be linearly interpolated between the mean lengths of the two closest ages. The ages to be used for L1 and L3 in the Schnute and Francis parameterizations are supplied as a numeric vector of length 2 in ages2use=. If ages2use=NULL then the minimum and maximum observed ages will be used. In the Francis method, L2 will correspond to the age half-way between the two ages in ages2use=. A warning will be given if L2<L1 for the Schnute method or if L2<L1 or L3<L2 for the Francis method.

    Starting values for the Somers and Pauly parameterizations are the same as the traditional parameterization for Linf, K, and t0. However, for the Pauly parameterization the starting value for Kpr is the starting value for K divided by 1 minus the starting value of NGT. The starting values of C, ts, WP, and NGT are set at constants that are unlikely to work for all species. Thus, the user should use the fixed argument to fix starting values for these parameters that are more likely to result in a reliable fit.

    -
    -

    Note

    +
    +

    Note

    The ‘original’ and ‘vonBertalanffy’ and the ‘typical’ and ‘BevertonHolt’ parameterizations are synonymous.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    12-Individual Growth.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    See references in vbFuns.

    -
    -

    See also

    +
    +

    See also

    See growthFunShow to display the equations for the parameterizations used in FSA and vbFuns for functions that represent the von Bertalanffy parameterizations. See nlsTracePlot for help troubleshooting nonlinear models that don't converge.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Simple examples of each parameterization
    -vbStarts(tl~age,data=SpotVA1)
    +    
    +

    Examples

    +
    ## Simple examples of each parameterization
    +vbStarts(tl~age,data=SpotVA1)
     #> $Linf
     #> [1] 13.26773
     #> 
    @@ -171,7 +209,7 @@ 

    Examples

    #> $t0 #> [1] -2.124367 #> -vbStarts(tl~age,data=SpotVA1,type="Original") +vbStarts(tl~age,data=SpotVA1,type="Original") #> $Linf #> [1] 13.26773 #> @@ -181,7 +219,7 @@

    Examples

    #> $L0 #> [1] 7.732 #> -vbStarts(tl~age,data=SpotVA1,type="GQ") +vbStarts(tl~age,data=SpotVA1,type="GQ") #> $omega #> [1] 5.459258 #> @@ -191,7 +229,7 @@

    Examples

    #> $t0 #> [1] -2.124367 #> -vbStarts(tl~age,data=SpotVA1,type="Mooij") +vbStarts(tl~age,data=SpotVA1,type="Mooij") #> $Linf #> [1] 13.26773 #> @@ -201,7 +239,7 @@

    Examples

    #> $omega #> [1] 5.459258 #> -vbStarts(tl~age,data=SpotVA1,type="Weisberg") +vbStarts(tl~age,data=SpotVA1,type="Weisberg") #> $Linf #> [1] 13.26773 #> @@ -211,7 +249,7 @@

    Examples

    #> $t0 #> [1] -2.124367 #> -vbStarts(tl~age,data=SpotVA1,type="Francis",ages2use=c(0,5)) +vbStarts(tl~age,data=SpotVA1,type="Francis",ages2use=c(0,5)) #> $L1 #> [1] 7.732 #> @@ -221,7 +259,7 @@

    Examples

    #> $L3 #> [1] 12.4 #> -vbStarts(tl~age,data=SpotVA1,type="Schnute",ages2use=c(0,5)) +vbStarts(tl~age,data=SpotVA1,type="Schnute",ages2use=c(0,5)) #> $L1 #> [1] 7.732 #> @@ -231,7 +269,7 @@

    Examples

    #> $K #> [1] 0.4114688 #> -vbStarts(tl~age,data=SpotVA1,type="Somers") +vbStarts(tl~age,data=SpotVA1,type="Somers") #> $Linf #> [1] 13.26773 #> @@ -247,7 +285,7 @@

    Examples

    #> $ts #> [1] 0.3 #> -vbStarts(tl~age,data=SpotVA1,type="Somers2") +vbStarts(tl~age,data=SpotVA1,type="Somers2") #> $Linf #> [1] 13.26773 #> @@ -263,7 +301,7 @@

    Examples

    #> $WP #> [1] 0.8 #> -vbStarts(tl~age,data=SpotVA1,type="Pauly") +vbStarts(tl~age,data=SpotVA1,type="Pauly") #> $Linf #> [1] 13.26773 #> @@ -279,7 +317,7 @@

    Examples

    #> $NGT #> [1] 0.3 #> -vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(tr=0)) +vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(tr=0)) #> $Linf #> [1] 13.26773 #> @@ -289,7 +327,7 @@

    Examples

    #> $Lr #> [1] 7.274961 #> -vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(Lr=8)) +vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(Lr=8)) #> $Linf #> [1] 13.26773 #> @@ -299,9 +337,9 @@

    Examples

    #> $tr #> [1] 0.7521688 #> - -## Using a different method to find Linf -vbStarts(tl~age,data=SpotVA1,methLinf="oldAge") + +## Using a different method to find Linf +vbStarts(tl~age,data=SpotVA1,methLinf="oldAge") #> $Linf #> [1] 12.4 #> @@ -311,7 +349,7 @@

    Examples

    #> $t0 #> [1] -2.374338 #> -vbStarts(tl~age,data=SpotVA1,methLinf="oldAge",num4Linf=2) +vbStarts(tl~age,data=SpotVA1,methLinf="oldAge",num4Linf=2) #> $Linf #> [1] 12.40625 #> @@ -321,7 +359,7 @@

    Examples

    #> $t0 #> [1] -2.37231 #> -vbStarts(tl~age,data=SpotVA1,methLinf="longFish") +vbStarts(tl~age,data=SpotVA1,methLinf="longFish") #> $Linf #> [1] 13.9 #> @@ -331,7 +369,7 @@

    Examples

    #> $t0 #> [1] -1.974668 #> -vbStarts(tl~age,data=SpotVA1,methLinf="longFish",num4Linf=10) +vbStarts(tl~age,data=SpotVA1,methLinf="longFish",num4Linf=10) #> $Linf #> [1] 13.64 #> @@ -341,7 +379,7 @@

    Examples

    #> $t0 #> [1] -2.033445 #> -vbStarts(tl~age,data=SpotVA1,type="Original",methLinf="oldAge") +vbStarts(tl~age,data=SpotVA1,type="Original",methLinf="oldAge") #> $Linf #> [1] 12.4 #> @@ -351,7 +389,7 @@

    Examples

    #> $L0 #> [1] 7.732 #> -vbStarts(tl~age,data=SpotVA1,type="Original",methLinf="oldAge",num4Linf=2) +vbStarts(tl~age,data=SpotVA1,type="Original",methLinf="oldAge",num4Linf=2) #> $Linf #> [1] 12.40625 #> @@ -361,7 +399,7 @@

    Examples

    #> $L0 #> [1] 7.732 #> -vbStarts(tl~age,data=SpotVA1,type="Original",methLinf="longFish") +vbStarts(tl~age,data=SpotVA1,type="Original",methLinf="longFish") #> $Linf #> [1] 13.9 #> @@ -371,7 +409,7 @@

    Examples

    #> $L0 #> [1] 7.732 #> -vbStarts(tl~age,data=SpotVA1,type="Original",methLinf="longFish",num4Linf=10) +vbStarts(tl~age,data=SpotVA1,type="Original",methLinf="longFish",num4Linf=10) #> $Linf #> [1] 13.64 #> @@ -381,7 +419,7 @@

    Examples

    #> $L0 #> [1] 7.732 #> -vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(tr=0),methLinf="oldAge",num4Linf=2) +vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(tr=0),methLinf="oldAge",num4Linf=2) #> $Linf #> [1] 12.40625 #> @@ -391,7 +429,7 @@

    Examples

    #> $Lr #> [1] 7.274961 #> -vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(Lr=8),methLinf="longFish",num4Linf=10) +vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(Lr=8),methLinf="longFish",num4Linf=10) #> $Linf #> [1] 13.64 #> @@ -401,9 +439,9 @@

    Examples

    #> $tr #> [1] 0.7521688 #> - -## Using a different method to find t0 and L0 -vbStarts(tl~age,data=SpotVA1,meth0="yngAge") + +## Using a different method to find t0 and L0 +vbStarts(tl~age,data=SpotVA1,meth0="yngAge") #> $Linf #> [1] 13.26773 #> @@ -413,7 +451,7 @@

    Examples

    #> $t0 #> [1] -2.124367 #> -vbStarts(tl~age,data=SpotVA1,type="original",meth0="yngAge") +vbStarts(tl~age,data=SpotVA1,type="original",meth0="yngAge") #> $Linf #> [1] 13.26773 #> @@ -423,9 +461,9 @@

    Examples

    #> $L0 #> [1] 7.732 #> - -## Using a different method to find the L1, L2, and L3 -vbStarts(tl~age,data=SpotVA1,type="Francis",ages2use=c(0,5),methEV="means") + +## Using a different method to find the L1, L2, and L3 +vbStarts(tl~age,data=SpotVA1,type="Francis",ages2use=c(0,5),methEV="means") #> $L1 #> [1] 7.732 #> @@ -435,7 +473,7 @@

    Examples

    #> $L3 #> [1] 12.4 #> -vbStarts(tl~age,data=SpotVA1,type="Schnute",ages2use=c(0,5),methEV="means") +vbStarts(tl~age,data=SpotVA1,type="Schnute",ages2use=c(0,5),methEV="means") #> $L1 #> [1] 7.732 #> @@ -445,9 +483,9 @@

    Examples

    #> $K #> [1] 0.4114688 #> - -## Examples with a Plot -vbStarts(tl~age,data=SpotVA1,plot=TRUE) + +## Examples with a Plot +vbStarts(tl~age,data=SpotVA1,plot=TRUE) #> $Linf #> [1] 13.26773 @@ -458,7 +496,7 @@

    Examples

    #> $t0 #> [1] -2.124367 #> -vbStarts(tl~age,data=SpotVA1,type="original",plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="original",plot=TRUE) #> $Linf #> [1] 13.26773 @@ -469,7 +507,7 @@

    Examples

    #> $L0 #> [1] 7.732 #> -vbStarts(tl~age,data=SpotVA1,type="GQ",plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="GQ",plot=TRUE) #> $omega #> [1] 5.459258 @@ -480,7 +518,7 @@

    Examples

    #> $t0 #> [1] -2.124367 #> -vbStarts(tl~age,data=SpotVA1,type="Mooij",plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Mooij",plot=TRUE) #> $Linf #> [1] 13.26773 @@ -491,7 +529,7 @@

    Examples

    #> $omega #> [1] 5.459258 #> -vbStarts(tl~age,data=SpotVA1,type="Weisberg",plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Weisberg",plot=TRUE) #> $Linf #> [1] 13.26773 @@ -502,7 +540,7 @@

    Examples

    #> $t0 #> [1] -2.124367 #> -vbStarts(tl~age,data=SpotVA1,type="Francis",ages2use=c(0,5),plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Francis",ages2use=c(0,5),plot=TRUE) #> $L1 #> [1] 7.732 @@ -513,7 +551,7 @@

    Examples

    #> $L3 #> [1] 12.4 #> -vbStarts(tl~age,data=SpotVA1,type="Schnute",ages2use=c(0,5),plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Schnute",ages2use=c(0,5),plot=TRUE) #> $L1 #> [1] 7.732 @@ -524,7 +562,7 @@

    Examples

    #> $K #> [1] 0.4114688 #> -vbStarts(tl~age,data=SpotVA1,type="Somers",plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Somers",plot=TRUE) #> $Linf #> [1] 13.26773 @@ -541,7 +579,7 @@

    Examples

    #> $ts #> [1] 0.3 #> -vbStarts(tl~age,data=SpotVA1,type="Somers2",plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Somers2",plot=TRUE) #> $Linf #> [1] 13.26773 @@ -558,7 +596,7 @@

    Examples

    #> $WP #> [1] 0.8 #> -vbStarts(tl~age,data=SpotVA1,type="Pauly",plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Pauly",plot=TRUE) #> $Linf #> [1] 13.26773 @@ -575,7 +613,7 @@

    Examples

    #> $NGT #> [1] 0.3 #> -vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(tr=0),plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(tr=0),plot=TRUE) #> $Linf #> [1] 13.26773 @@ -586,7 +624,7 @@

    Examples

    #> $Lr #> [1] 7.274961 #> -vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(Lr=8),plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(Lr=8),plot=TRUE) #> $Linf #> [1] 13.26773 @@ -597,9 +635,9 @@

    Examples

    #> $tr #> [1] 0.7521688 #> - -## Examples where some parameters are fixed by the user -vbStarts(tl~age,data=SpotVA1,fixed=list(Linf=15)) + +## Examples where some parameters are fixed by the user +vbStarts(tl~age,data=SpotVA1,fixed=list(Linf=15)) #> $Linf #> [1] 15 #> @@ -609,7 +647,7 @@

    Examples

    #> $t0 #> [1] -1.760933 #> -vbStarts(tl~age,data=SpotVA1,fixed=list(Linf=15,K=0.3)) +vbStarts(tl~age,data=SpotVA1,fixed=list(Linf=15,K=0.3)) #> $Linf #> [1] 15 #> @@ -619,7 +657,7 @@

    Examples

    #> $t0 #> [1] -2.41523 #> -vbStarts(tl~age,data=SpotVA1,fixed=list(Linf=15,K=0.3,t0=-1)) +vbStarts(tl~age,data=SpotVA1,fixed=list(Linf=15,K=0.3,t0=-1)) #> $Linf #> [1] 15 #> @@ -629,7 +667,7 @@

    Examples

    #> $t0 #> [1] -1 #> -vbStarts(tl~age,data=SpotVA1,fixed=list(Linf=15,K=0.3,t0=-1),plot=TRUE) +vbStarts(tl~age,data=SpotVA1,fixed=list(Linf=15,K=0.3,t0=-1),plot=TRUE) #> $Linf #> [1] 15 @@ -640,7 +678,7 @@

    Examples

    #> $t0 #> [1] -1 #> -vbStarts(tl~age,data=SpotVA1,type="Pauly",fixed=list(t0=-1.5),plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Pauly",fixed=list(t0=-1.5),plot=TRUE) #> $Linf #> [1] 13.26773 @@ -657,7 +695,7 @@

    Examples

    #> $NGT #> [1] 0.3 #> -vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(tr=2),fixed=list(Lr=10),plot=TRUE) +vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(tr=2),fixed=list(Lr=10),plot=TRUE) #> $Linf #> [1] 13.26773 @@ -668,31 +706,27 @@

    Examples

    #> $Lr #> [1] 10 #> - -## See examples in vbFuns() for use of vbStarts() when fitting Von B models - + +## See examples in vbFuns() for use of vbStarts() when fitting Von B models +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/wrAdd.html b/docs/reference/wrAdd.html index 90be86af..8ee03088 100644 --- a/docs/reference/wrAdd.html +++ b/docs/reference/wrAdd.html @@ -1,144 +1,158 @@ -Computes a vector of relative weights specific to a species in an entire data frame. — wrAdd • FSAComputes a vector of relative weights specific to a species in an entire data frame. — wrAdd • FSA + + Skip to contents -
    -
    -
    - +
    +
    +
    -
    +

    This computes a vector that contains the relative weight specific to each species for all individuals in an entire data frame.

    -
    -
    wrAdd(wt, ...)
    -
    -# S3 method for default
    -wrAdd(wt, len, spec, units = c("metric", "English"), ...)
    -
    -# S3 method for formula
    -wrAdd(wt, data, units = c("metric", "English"), ...)
    +
    +

    Usage

    +
    wrAdd(wt, ...)
    +
    +# S3 method for default
    +wrAdd(wt, len, spec, units = c("metric", "English"), ...)
    +
    +# S3 method for formula
    +wrAdd(wt, data, units = c("metric", "English"), ...)
    -
    -

    Arguments

    +
    +

    Arguments

    wt

    A numeric vector that contains weight measurements or a formula of the form wt~len+spec where “wt” generically represents the weight variable, “len” generically represents the length variable, and “spec” generically represents the species variable. Note that this formula can only contain three variables and they must be in the order of weight first, length second, species third.

    + +
    ...

    Not used.

    + +
    len

    A numeric vector that contains length measurements. Not used if wt is a formula.

    + +
    spec

    A character or factor vector that contains the species names. Not used if wt is a formula.

    + +
    units

    A string that indicates whether the weight and length data in formula are in ("metric" (DEFAULT; mm and g) or "English" (in and lbs) units.

    + +
    data

    A data.frame that minimally contains variables of the the observed lengths, observed weights, and the species names given in the formula=.

    +
    -
    -

    Value

    -

    Returns A numeric vector that contains the computed relative weights, in the same order as in data=.

    +
    +

    Value

    + + +

    Returns A numeric vector that contains the computed relative weights, in the same order as in data=.

    -
    -

    Details

    +
    +

    Details

    This computes a vector that contains the relative weight specific to each species for all individuals in an entire data frame. The vector can be appended to an existing data.frame to create a variable that contains the relative weights for each individual. The relative weight value will be NA for each individual for which a standard weight equation does not exist in WSlit, a standard weight equation for the units given in units= does not exist in WSlit, a standard weight equation for the 75th percentile does not exist in WSlit, or if the individual is shorter or longer than the lengths for which the standard weight equation should be applied. Either the linear or quadratic equation has been listed as preferred for each species, so only that equation will be used. The use of the 75th percentile is by far the most common and, because this function is designed for use on entire data frames, it will be the only percentile allowed. Thus, to use equations for other percentiles, one will have to use “manual” methods. See WSlit and wsVal for more details about types of equations, percentiles, finding which species have published standard weight equations, etc. See the examples for one method for changing species names to something that this function will recognize.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    8-Condition.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    See also

    +
    +

    See also

    See wsVal, WSlit, and psdAdd for related functionality. See mapvalues for help in changing species names to match those in WSlit.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    ## Create random data for three species
    -# just to control the randomization
    -set.seed(345234534)
    -dbt <- data.frame(species=factor(rep(c("Bluefin Tuna"),30)),
    -                  tl=round(rnorm(30,1900,300),0))
    -dbt$wt <- round(4.5e-05*dbt$tl^2.8+rnorm(30,0,6000),1)
    -dbg <- data.frame(species=factor(rep(c("Bluegill"),30)),
    -                  tl=round(rnorm(30,130,50),0))
    -dbg$wt <- round(4.23e-06*dbg$tl^3.316+rnorm(30,0,10),1)
    -dlb <- data.frame(species=factor(rep(c("Largemouth Bass"),30)),
    -                  tl=round(rnorm(30,350,60),0))
    -dlb$wt <- round(2.96e-06*dlb$tl^3.273+rnorm(30,0,60),1)
    -df <- rbind(dbt,dbg,dlb)
    -str(df)
    +    
    +

    Examples

    +
    ## Create random data for three species
    +# just to control the randomization
    +set.seed(345234534)
    +dbt <- data.frame(species=factor(rep(c("Bluefin Tuna"),30)),
    +                  tl=round(rnorm(30,1900,300),0))
    +dbt$wt <- round(4.5e-05*dbt$tl^2.8+rnorm(30,0,6000),1)
    +dbg <- data.frame(species=factor(rep(c("Bluegill"),30)),
    +                  tl=round(rnorm(30,130,50),0))
    +dbg$wt <- round(4.23e-06*dbg$tl^3.316+rnorm(30,0,10),1)
    +dlb <- data.frame(species=factor(rep(c("Largemouth Bass"),30)),
    +                  tl=round(rnorm(30,350,60),0))
    +dlb$wt <- round(2.96e-06*dlb$tl^3.273+rnorm(30,0,60),1)
    +df <- rbind(dbt,dbg,dlb)
    +str(df)
     #> 'data.frame':	90 obs. of  3 variables:
     #>  $ species: Factor w/ 3 levels "Bluefin Tuna",..: 1 1 1 1 1 1 1 1 1 1 ...
     #>  $ tl     : num  1371 1558 2031 2226 2124 ...
     #>  $ wt     : num  19231 38147 90530 104718 88761 ...
    -
    -df$Wr1 <- wrAdd(wt~tl+species,data=df)
    -## same but with non-formula interface
    -df$Wr2 <- wrAdd(df$wt,df$tl,df$species)
    -
    -## Same as above but using dplyr
    -if (require(dplyr)) {
    -  df <- mutate(df,Wr3a=wrAdd(wt,tl,species))
    -}
    -
    -df
    +
    +df$Wr1 <- wrAdd(wt~tl+species,data=df)
    +## same but with non-formula interface
    +df$Wr2 <- wrAdd(df$wt,df$tl,df$species)
    +
    +## Same as above but using dplyr
    +if (require(dplyr)) {
    +  df <- mutate(df,Wr3a=wrAdd(wt,tl,species))
    +}
    +
    +df
     #>            species   tl       wt       Wr1       Wr2      Wr3a
     #> 1     Bluefin Tuna 1371  19231.0        NA        NA        NA
     #> 2     Bluefin Tuna 1558  38146.9        NA        NA        NA
    @@ -230,33 +244,29 @@ 

    Examples

    #> 88 Largemouth Bass 441 1288.2 96.10274 96.10274 96.10274 #> 89 Largemouth Bass 266 265.7 103.69408 103.69408 103.69408 #> 90 Largemouth Bass 371 612.6 80.46670 80.46670 80.46670 - -## Example with only one species in the data.frame -bg <- droplevels(subset(df,species=="Bluegill")) -bg$Wr4 <- wrAdd(wt~tl+species,data=bg) - + +## Example with only one species in the data.frame +bg <- droplevels(subset(df,species=="Bluegill")) +bg$Wr4 <- wrAdd(wt~tl+species,data=bg) +
    -
    - -
    +
    -
    +
    - diff --git a/docs/reference/wsVal.html b/docs/reference/wsVal.html index c729df95..df0178a5 100644 --- a/docs/reference/wsVal.html +++ b/docs/reference/wsVal.html @@ -1,115 +1,127 @@ -Finds standard weight equation coefficients for a particular species. — wsVal • FSAFinds standard weight equation coefficients for a particular species. — wsVal • FSA + + Skip to contents -
    -
    -
    - + + +
    +
    +
    +
    -
    +

    Returns a vector that contains all known or a subset of information about the standard weight equation for a given species, type of measurement units, and reference percentile.

    -
    -
    wsVal(
    -  species = "List",
    -  units = c("metric", "English"),
    -  ref = 75,
    -  simplify = FALSE
    -)
    +
    +

    Usage

    +
    wsVal(
    +  species = "List",
    +  units = c("metric", "English"),
    +  ref = 75,
    +  simplify = FALSE
    +)
    -
    -

    Arguments

    +
    +

    Arguments

    species

    A string that contains the species name for which to find coefficients. See details.

    + +
    units

    A string that indicates whether the coefficients for the standard weight equation to be returned are in ("metric" (DEFAULT; mm and g) or "English" (in and lbs) units.

    + +
    ref

    A numeric that indicates which percentile the equation should be returned for. Note that the vast majority of equations only exist for the 75th percentile (DEFAULT).

    + +
    simplify

    A logical that indicates whether the ‘units’, ‘ref’, ‘measure’, ‘method’, ‘comments’, and ‘source’ fields should be included (=FALSE) or not (=TRUE; DEFAULT). See details.

    +
    -
    -

    Value

    -

    A one row data frame from WSlit that contains all known information about the standard weight equation for a given species, type of measurement units, and reference percentile if simplify=FALSE. If simplify=TRUE then only the species; minimum and maximum length for which the standard equation should be applied; and intercept, slope, and quadratic coefficients for the standard weight equation. Note that the maximum length and the quadratic coefficient will not be returned if they do not exist in WSlit. -If no arguments are given to this function, a species name is mis-spelled, or if a standard weight equation does not exist (in WSlit) for a particular species, then a warning will be issued and a list of species names will be printed.

    +
    +

    Value

    + + +

    A one row data frame from WSlit that contains all known information about the standard weight equation for a given species, type of measurement units, and reference percentile if simplify=FALSE. If simplify=TRUE then only the species; minimum and maximum length for which the standard equation should be applied; and intercept, slope, and quadratic coefficients for the standard weight equation. Note that the maximum length and the quadratic coefficient will not be returned if they do not exist in WSlit.

    + + +

    If no arguments are given to this function, a species name is mis-spelled, or if a standard weight equation does not exist (in WSlit) for a particular species, then a warning will be issued and a list of species names will be printed.

    -
    -

    Details

    +
    +

    Details

    This function extract all known information from WSlit about the following standard weight equation,

    $$log_{10}(Ws) = log_{10}(a) + blog_{10}(L) + blog_{10}(L)^{2}$$

    See WSlit for more information about the meaning of each value returned.

    Note from above that the coefficients are returned for the TRANSFORMED model. Thus, to obtain the standard weight (Ws), the returned coefficients are used to compute the common log of Ws which must then bed raised to the power of 10 to compute the Ws.

    -
    -

    IFAR Chapter

    +
    +

    IFAR Chapter

    8-Condition.

    -
    -

    References

    -

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    +
    +

    References

    +

    Ogle, D.H. 2016. Introductory Fisheries Analyses with R. Chapman & Hall/CRC, Boca Raton, FL.

    -
    -

    See also

    +
    +

    See also

    See wrAdd and WSlit for related functionality.

    -
    -

    Author

    -

    Derek H. Ogle, derek@derekogle.com

    +
    +

    Author

    +

    Derek H. Ogle, DerekOgle51@gmail.com

    -
    -

    Examples

    -
    wsVal()
    +    
    +

    Examples

    +
    wsVal()
     #> 
     #> Species name must be one of following. Be careful of spelling and capitalization.
     #>   [1] "Aegean Chub"                       "African Sharptooth Catfish"       
    @@ -166,49 +178,45 @@ 

    Examples

    #> [103] "White Sturgeon" "White Sucker" #> [105] "Yellow Bass" "Yellow Bullhead" #> [107] "Yellow Perch" -wsVal("Bluegill") +wsVal("Bluegill") #> species units type ref measure method min.TL int slope source #> 26 Bluegill metric linear 75 TL Other 80 -5.374 3.316 Hillman (1982) -wsVal("Bluegill",units="metric") +wsVal("Bluegill",units="metric") #> species units type ref measure method min.TL int slope source #> 26 Bluegill metric linear 75 TL Other 80 -5.374 3.316 Hillman (1982) -wsVal("Bluegill",units="English") +wsVal("Bluegill",units="English") #> species units type ref measure method min.TL int slope #> 25 Bluegill English linear 75 TL Other 3 -3.371 3.316 #> source #> 25 Hillman (1982) -wsVal("Bluegill",units="English",simplify=TRUE) +wsVal("Bluegill",units="English",simplify=TRUE) #> species min.TL int slope #> 25 Bluegill 3 -3.371 3.316 -wsVal("Ruffe",units="metric",simplify=TRUE) +wsVal("Ruffe",units="metric",simplify=TRUE) #> species min.TL max.TL int slope quad #> 142 Ruffe 55 205 -2.58 0.621 0.6073 -wsVal("Ruffe",units="metric",ref=50,simplify=TRUE) +wsVal("Ruffe",units="metric",ref=50,simplify=TRUE) #> species min.TL max.TL int slope quad #> 141 Ruffe 55 205 -3.3524 1.3969 0.4054 - +
    -
    - -
    +
    -
    +
    - diff --git a/man/BluegillJL.Rd b/man/BluegillJL.Rd index 5d036c5d..f013c8c8 100644 --- a/man/BluegillJL.Rd +++ b/man/BluegillJL.Rd @@ -12,7 +12,7 @@ A data frame with 277 observations on the following 2 variables. } } \source{ -From example 8.1 in Schneider, J.C. 1998. Lake fish population estimates by mark-and-recapture methods. Chapter 8 in Schneider, J.C. (ed.) 2000. Manual of fisheries survey methods II: with periodic updates. Michigan Department of Natural Resources, Fisheries Special Report 25, Ann Arbor. [Was (is?) from http://www.michigandnr.com/publications/pdfs/IFR/manual/SMII\%20Chapter08.pdf.] +From example 8.1 in Schneider, J.C. 1998. Lake fish population estimates by mark-and-recapture methods. Chapter 8 in Schneider, J.C. (ed.) 2000. Manual of fisheries survey methods II: with periodic updates. Michigan Department of Natural Resources, Fisheries Special Report 25, Ann Arbor. [Was (is?) from http://www.michigandnr.com/publications/pdfs/IFR/manual/SMII\%20Chapter08.pdf.] \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/BluegillJL.csv}{CSV file} } \description{ Each line consists of the capture history over two samples of Bluegill (\emph{Lepomis macrochirus}) in Jewett Lake (MI). This file contains the capture histories for only Bluegill larger than 6-in. diff --git a/man/BrookTroutTH.Rd b/man/BrookTroutTH.Rd index f1ca3db9..b107a084 100644 --- a/man/BrookTroutTH.Rd +++ b/man/BrookTroutTH.Rd @@ -12,7 +12,7 @@ A data frame with 7 observations on the following 2 variables. } } \source{ -Quinlan, H.R. 1999. Biological Characteristics of Coaster Brook Trout at Isle Royale National Park, Michigan, 1996-98. U.S. Fish and Wildlife Service Ashland Fishery Resources Office report. November 1999. [Was (is?) from http://www.fws.gov/midwest/ashland/brook/biochar/biolchar.html.] +Quinlan, H.R. 1999. Biological Characteristics of Coaster Brook Trout at Isle Royale National Park, Michigan, 1996-98. U.S. Fish and Wildlife Service Ashland Fishery Resources Office report. November 1999. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/BrookTroutTH.csv}{CSV file} } \description{ Catch-at-age in fyke nets from 1996-1998 for \dQuote{Coaster} Brook Trout (\emph{Salvelinus fontinalis}) in Tobin Harbor, Isle Royale, Lake Superior. diff --git a/man/ChinookArg.Rd b/man/ChinookArg.Rd index f946b4e5..b04fa6c3 100644 --- a/man/ChinookArg.Rd +++ b/man/ChinookArg.Rd @@ -13,7 +13,7 @@ A data frame with 112 observations on the following 3 variables: } } \source{ -From Figure 4 in Soto, D., I. Arismendi, C. Di Prinzio, and F. Jara. 2007. Establishment of Chinook Salmon (\emph{Oncorhynchus tshawytscha}) in Pacific basins of southern South America and its potential ecosystem implications. Revista Chilena d Historia Natural, 80:81-98. [Was (is?) from http://www.scielo.cl/pdf/rchnat/v80n1/art07.pdf.] +From Figure 4 in Soto, D., I. Arismendi, C. Di Prinzio, and F. Jara. 2007. Establishment of Chinook Salmon (\emph{Oncorhynchus tshawytscha}) in Pacific basins of southern South America and its potential ecosystem implications. Revista Chilena d Historia Natural, 80:81-98. [Was (is?) from http://www.scielo.cl/pdf/rchnat/v80n1/art07.pdf.] \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/ChinookArg.csv}{CSV file} } \description{ Lengths and weights for Chinook Salmon from three locations in Argentina. diff --git a/man/CodNorwegian.Rd b/man/CodNorwegian.Rd index 30226ec0..ad4c5208 100644 --- a/man/CodNorwegian.Rd +++ b/man/CodNorwegian.Rd @@ -13,7 +13,7 @@ A data frame of 24 observations on the following 3 variables: } } \source{ -From Garrod, D.J. 1967. Population dynamics of the Arcto-Norwegian Cod. Journal of the Fisheries Research Board of Canada, 24:145-190. +From Garrod, D.J. 1967. Population dynamics of the Arcto-Norwegian Cod. Journal of the Fisheries Research Board of Canada, 24:145-190. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/CodNorwegian.csv}{CSV file} } \description{ Norwegian cod (\emph{Gadus morhua}) stock and recruitment by year, 1937-1960. diff --git a/man/CutthroatAL.Rd b/man/CutthroatAL.Rd index afcfd5ad..016da68c 100644 --- a/man/CutthroatAL.Rd +++ b/man/CutthroatAL.Rd @@ -20,7 +20,7 @@ A data frame with 1684 observations on the following 10 variables. } } \source{ -From Appendix A.3 of Harding, R.D., C.L. Hoover, and R.P. Marshall. 2010. Abundance of Cutthroat Trout in Auke Lake, Southeast Alaska, in 2005 and 2006. Alaska Department of Fish and Game Fisheries Data Series No. 10-82. [Was (is?) from http://www.sf.adfg.state.ak.us/FedAidPDFs/FDS10-82.pdf.] +From Appendix A.3 of Harding, R.D., C.L. Hoover, and R.P. Marshall. 2010. Abundance of Cutthroat Trout in Auke Lake, Southeast Alaska, in 2005 and 2006. Alaska Department of Fish and Game Fisheries Data Series No. 10-82. [Was (is?) from http://www.sf.adfg.state.ak.us/FedAidPDFs/FDS10-82.pdf.] \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/CutthroatAL.csv}{CSV file} } \description{ Individual capture histories of Cutthroat Trout (\emph{Oncorhynchus clarki}) in Auke Lake, Alaska, from samples taken in 1998-2006. diff --git a/man/Ecoli.Rd b/man/Ecoli.Rd index 536074c8..d407e22e 100644 --- a/man/Ecoli.Rd +++ b/man/Ecoli.Rd @@ -12,7 +12,7 @@ A data frame with 8 observations on the following 2 variables: } } \source{ -McKendrick, A.G. and M. Kesava Pai. 1911. The Rate of Multiplication of Micro-Organisms: a Mathematical Study. Proceedings of the Royal Society of Edinburgh. 31:649-655. +McKendrick, A.G. and M. Kesava Pai. 1911. The Rate of Multiplication of Micro-Organisms: a Mathematical Study. Proceedings of the Royal Society of Edinburgh. 31:649-655. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/Ecoli.csv}{CSV file} } \description{ The number of \emph{Escherichia coli} cells versus time. diff --git a/man/FSA.Rd b/man/FSA.Rd index 8f266482..bd1b7e51 100644 --- a/man/FSA.Rd +++ b/man/FSA.Rd @@ -8,7 +8,7 @@ Functions to support basic fisheries stock assessment methods. } \details{ -Functions from this package can be used to perform a variety of basic fisheries stock assessment methods. Detailed descriptions for most functions are available in the \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analysis with R} book (Ogle 2016). Vignettes for the boxed examples in the \dQuote{Analysis and Interpretation of Freshwater Fisheries Data} book can be viewed with \code{fishR("AIFFD")}. +Functions from this package can be used to perform a variety of basic fisheries stock assessment methods. Detailed descriptions for most functions are available in the Introductory Fisheries Analysis with R book (Ogle 2016) (see \code{fishR("IFAR")}). Vignettes for the boxed examples in the \dQuote{Analysis and Interpretation of Freshwater Fisheries Data} book can be viewed with \code{fishR("AIFFD")}. Questions, comments, or suggestions should be given on the \href{https://github.com/fishR-Core-Team/FSA/issues/}{GitHub FSA Issues page}. @@ -20,5 +20,5 @@ Packages with related functionality by the same author are } } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } diff --git a/man/Mirex.Rd b/man/Mirex.Rd index 95b3973e..80971828 100644 --- a/man/Mirex.Rd +++ b/man/Mirex.Rd @@ -14,7 +14,7 @@ A data frame with 122 observations on the following 4 variables. } } \source{ -From (actual data) Makarewicz, J.C., E.Damaske, T.W. Lewis, and M. Merner. 2003. Trend analysis reveals a recent reduction in mirex concentrations in Coho (\emph{Oncorhynchus kisutch}) and Chinook (\emph{O. tshawytscha}) Salmon from Lake Ontario. Environmental Science and Technology, 37:1521-1527. +From (actual data) Makarewicz, J.C., E.Damaske, T.W. Lewis, and M. Merner. 2003. Trend analysis reveals a recent reduction in mirex concentrations in Coho (\emph{Oncorhynchus kisutch}) and Chinook (\emph{O. tshawytscha}) Salmon from Lake Ontario. Environmental Science and Technology, 37:1521-1527. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/Mirex.csv}{CSV file} } \description{ Mirex concentration, weight, capture year, and species of Lake Ontario Coho and Chinook salmon. diff --git a/man/PSDlit.Rd b/man/PSDlit.Rd index 6e097585..e0898c93 100644 --- a/man/PSDlit.Rd +++ b/man/PSDlit.Rd @@ -47,7 +47,7 @@ head(PSDlit) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \seealso{ See \code{\link{psdVal}}, \code{\link{psdCalc}}, \code{\link{psdPlot}}, \code{\link{psdAdd}}, and \code{\link{tictactoe}} for related functionality. diff --git a/man/PikeNY.Rd b/man/PikeNY.Rd index e31ad009..4538f321 100644 --- a/man/PikeNY.Rd +++ b/man/PikeNY.Rd @@ -14,7 +14,7 @@ A data frame with 21 observations on the following 4 variables: } } \source{ -New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216). +New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216). \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/PikeNY.csv}{CSV file} } \description{ Summary results of capture histories (number captured, number of recaptured fish, and number of unmarked fish that were marked) for all Buckhorn Marsh Northern Pike (\emph{Esox lucius}). diff --git a/man/PikeNYPartial1.Rd b/man/PikeNYPartial1.Rd index 175d5132..2068260a 100644 --- a/man/PikeNYPartial1.Rd +++ b/man/PikeNYPartial1.Rd @@ -15,7 +15,7 @@ A data frame with 57 observations on the following 4 variables. } } \source{ -Summary values taken from Table C-1 of New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216). +Summary values taken from Table C-1 of New York Power Authority. 2004. Use of Buckhorn Marsh and Grand Island tributaries by Northern Pike for spawning and as a nursery. Technical report, New York Power Authority, January 2004. Niagara Power Project (FERC No. 2216). \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/PikeNYPartial1.csv}{CSV file} } \description{ Each line consists of the capture history over four samples of Northern Pike (\emph{Esox lucius}) in Buckhorn Marsh. This file contains the capture histories for only those pike captured from April 1-4. diff --git a/man/SMBassLS.Rd b/man/SMBassLS.Rd index 9c4deeb1..1f7ee702 100644 --- a/man/SMBassLS.Rd +++ b/man/SMBassLS.Rd @@ -13,7 +13,7 @@ A data frame with 10 observations on the following 3 variables: } } \source{ -From Omand, D.N. 1951. A study of populations of fish based on catch-effort statistics. Journal of Wildlife Management, 15:88-98. +From Omand, D.N. 1951. A study of populations of fish based on catch-effort statistics. Journal of Wildlife Management, 15:88-98. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/SMBassLS.csv}{CSV file} } \description{ Catch-effort data for Smallmouth Bass (\emph{Micropterus dolomieu}) in Little Silver Lake, Ont. diff --git a/man/SMBassWB.Rd b/man/SMBassWB.Rd index be925608..6ebe9ed8 100644 --- a/man/SMBassWB.Rd +++ b/man/SMBassWB.Rd @@ -30,7 +30,7 @@ A data frame of 445 observations on the following 20 variables: } } \source{ -Data from the linear growth modeling software distributed in support of Weisberg, S. 1993. Using hard-part increment data to estimate age and environmental effects. Canadian Journal of Fisheries and Aquatic Sciences 50:1229-1237. +Data from the linear growth modeling software distributed in support of Weisberg, S. 1993. Using hard-part increment data to estimate age and environmental effects. Canadian Journal of Fisheries and Aquatic Sciences 50:1229-1237. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/SMBassWB.csv}{CSV file} } \description{ Growth data from Smallmouth Bass (\emph{Micropterus dolomieu}) captured in West Bearskin Lake, MN. Five samples were collected over three years (1988-1990) with two gears (fall -- trapnets, spring -- electrofishing). diff --git a/man/Schnute.Rd b/man/Schnute.Rd index 23c81807..9bba8f33 100644 --- a/man/Schnute.Rd +++ b/man/Schnute.Rd @@ -69,6 +69,6 @@ Schnute, J. 1981. A versatile growth model with statistical stable parameters. C See \code{\link{vbFuns}}, \code{\link{GompertzFuns}}, \code{\link{RichardsFuns}}, \code{\link{logisticFuns}}, and \code{\link{SchnuteRichards}} for similar functionality for other models. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/SchnuteRichards.Rd b/man/SchnuteRichards.Rd index fa6d7695..1b1fa52f 100644 --- a/man/SchnuteRichards.Rd +++ b/man/SchnuteRichards.Rd @@ -46,6 +46,6 @@ Schnute, J.T. and L.J. Richards. 1990. A unified approach to the analysis of fis See \code{\link{vbFuns}}, \code{\link{GompertzFuns}}, \code{\link{RichardsFuns}}, \code{\link{logisticFuns}}, and \code{\link{Schnute}} for similar functionality for other models. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/SpotVA1.Rd b/man/SpotVA1.Rd index 7bfc8f32..57f327d2 100644 --- a/man/SpotVA1.Rd +++ b/man/SpotVA1.Rd @@ -12,7 +12,7 @@ A data frame of 403 observations on the following 2 variables: } } \source{ -Extracted from Table 1 in Chapter 8 (Spot) of the VMRC Final Report on Finfish Ageing, 2002 by the Center for Quantitative Fisheries Ecology at Old Dominion University. +Extracted from Table 1 in Chapter 8 (Spot) of the VMRC Final Report on Finfish Ageing, 2002 by the Center for Quantitative Fisheries Ecology at Old Dominion University. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/SpotVA1.csv}{CSV file} } \description{ Ages (from otoliths) and lengths of Virginia Spot (\emph{Leiostomus xanthurus}). diff --git a/man/Summarize.Rd b/man/Summarize.Rd index ff66cdba..470c5723 100644 --- a/man/Summarize.Rd +++ b/man/Summarize.Rd @@ -113,6 +113,6 @@ lapply(as.list(d[,1:3]),Summarize,digits=4) See \code{\link[base]{summary}} for related one dimensional functionality. See \code{\link[base]{tapply}}, \code{summaryBy} in \pkg{doBy}, \code{\link[psych]{describe}} in \pkg{psych}, \code{describe} in \pkg{prettyR}, and \code{basicStats} in \pkg{fBasics} for similar \dQuote{by} functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{misc} diff --git a/man/WR79.Rd b/man/WR79.Rd index 6227c8a4..1a725f26 100644 --- a/man/WR79.Rd +++ b/man/WR79.Rd @@ -13,7 +13,7 @@ A data frame of 2369 observations on the following 3 variables: } } \source{ -Simulated from Table 2A in Westerheim, S.J. and W.E. Ricker. 1979. Bias in using age-length key to estimate age-frequency distributions. Journal of the Fisheries Research Board of Canada. 35:184-189. +Simulated from Table 2A in Westerheim, S.J. and W.E. Ricker. 1979. Bias in using age-length key to estimate age-frequency distributions. Journal of the Fisheries Research Board of Canada. 35:184-189. \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/WR79.csv}{CSV file} } \description{ Ages and lengths for a hypothetical sample in Westerheim and Ricker (1979). diff --git a/man/WSlit.Rd b/man/WSlit.Rd index d3499636..b74cb6aa 100644 --- a/man/WSlit.Rd +++ b/man/WSlit.Rd @@ -52,7 +52,7 @@ head(WSlit) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \seealso{ See \code{\link{wsVal}} and \code{\link{wrAdd}} for related functionality. diff --git a/man/WhitefishLC.Rd b/man/WhitefishLC.Rd index 0d9136e6..1e87fb5b 100644 --- a/man/WhitefishLC.Rd +++ b/man/WhitefishLC.Rd @@ -21,7 +21,7 @@ A data frame with 151 observations on the following 11 variables: } } \source{ -Data from Herbst, S.J. and J.E. Marsden. 2011. Comparison of precision and bias of scale, fin ray, and otolith age estimates for lake whitefish (\emph{Coregonus clupeaformis}) in Lake Champlain. Journal of Great Lakes Research. 37:386-389. Contributed by Seth Herbst. \bold{Do not use for other than educational purposes without permission from the author.} [Was (is?) from http://www.uvm.edu/rsenr/emarsden/documents/Herbst\%20and\%20Marsden\%20whitefish\%20age\%20structure\%20comparison.pdf.] +Data from Herbst, S.J. and J.E. Marsden. 2011. Comparison of precision and bias of scale, fin ray, and otolith age estimates for lake whitefish (\emph{Coregonus clupeaformis}) in Lake Champlain. Journal of Great Lakes Research. 37:386-389. Contributed by Seth Herbst. \bold{Do not use for other than educational purposes without permission from the author.} \href{https://raw.githubusercontent.com/fishR-Core-Team/FSA/master/data-raw/WhitefishLC.csv}{CSV file} } \description{ Assigned ages from two readers on three structures for Lake Whitefish (\emph{Coregonus clupeaformis}) from Lake Champlain in 2009. diff --git a/man/addZeroCatch.Rd b/man/addZeroCatch.Rd index 9052865c..9c7e72ea 100644 --- a/man/addZeroCatch.Rd +++ b/man/addZeroCatch.Rd @@ -124,12 +124,12 @@ df6mod1 } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \seealso{ \code{complete} in \pkg{tidyr} package. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/ageBias.Rd b/man/ageBias.Rd index 85844660..838c4684 100644 --- a/man/ageBias.Rd +++ b/man/ageBias.Rd @@ -18,8 +18,8 @@ ageBias( \method{summary}{ageBias}( object, - what = c("table", "symmetry", "Bowker", "EvansHoenig", "McNemar", "bias", - "diff.bias", "n"), + what = c("table", "symmetry", "Bowker", "EvansHoenig", "McNemar", "bias", "diff.bias", + "n"), flip.table = FALSE, zero.print = "-", digits = 3, @@ -325,7 +325,7 @@ par(op) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Campana, S.E., M.C. Annand, and J.I. McMillan. 1995. Graphical and statistical methods for determining the consistency of age determinations. Transactions of the American Fisheries Society 124:131-138. [Was (is?) available from http://www.bio.gc.ca/otoliths/documents/Campana\%20et\%20al\%201995\%20TAFS.pdf.] @@ -341,7 +341,7 @@ Muir, A.M., M.P. Ebener, J.X. He, and J.E. Johnson. 2008. A comparison of the sc See \code{\link{agePrecision}} for measures of precision between pairs of age estimates. See \code{\link[fishmethods]{compare2}} in \pkg{fishmethods} for similar functionality. See \code{\link{plotAB}} for a more traditional age-bias plot. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{htest} \keyword{manip} diff --git a/man/agePrecision.Rd b/man/agePrecision.Rd index b79da375..0389d49a 100644 --- a/man/agePrecision.Rd +++ b/man/agePrecision.Rd @@ -68,10 +68,10 @@ If \code{what="precision"} in \code{summary} then a summary table that contains \item R Number of age estimates given in \code{formula}. \item PercAgree The percentage of fish for which all age estimates perfectly agree. \item ASD The average (across all fish) standard deviation of ages within a fish. - \item ACV The average (across all fish) coefficient of variation of ages within a fish using the \bold{mean} as the divisor. See the \href{http://derekogle.com/IFAR/}{IFAR chapter} for calculation details. + \item ACV The average (across all fish) coefficient of variation of ages within a fish using the \bold{mean} as the divisor. See the \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{IFAR chapter} for calculation details. \item ACV2 The average (across all fish) coefficient of variation of ages within a fish using the \bold{median} as the divisor. This will only be shown if R>2 or \code{show.prec2=TRUE}. \item AAD The average (across all fish) absolute deviation of ages within a fish. - \item APE The average (across all fish) percent error of ages within a fish using the \bold{mean} as the divisor. See the \href{http://derekogle.com/IFAR/}{IFAR chapter} for calculation details. + \item APE The average (across all fish) percent error of ages within a fish using the \bold{mean} as the divisor. See the \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{IFAR chapter} for calculation details. \item APE2 The average (across all fish) percent error of ages within a fish using the \bold{median} as the divisor. This will only be shown if R>2 or \code{show.prec2=TRUE}. \item AD The average (across all fish) index of precision (D). } @@ -137,7 +137,7 @@ plot(median~mean,data=ap2$detail,pch=19,col=col2rgbt("black",1/5), } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Beamish, R.J. and D.A. Fournier. 1981. A method for comparing the precision of a set of age determinations. Canadian Journal of Fisheries and Aquatic Sciences 38:982-983. [Was (is?) available from http://www.pac.dfo-mpo.gc.ca/science/people-gens/beamish/PDF_files/compareagecjfas1981.pdf.] @@ -153,7 +153,7 @@ McBride, R.S. 2015. Diagnosis of paired age agreement: A simulation approach of See \code{\link{ageBias}} for computation of the full age agreement table, along with tests and plots of age bias. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{htest} \keyword{manip} diff --git a/man/alkAgeDist.Rd b/man/alkAgeDist.Rd index 25de6ea7..e8183b08 100644 --- a/man/alkAgeDist.Rd +++ b/man/alkAgeDist.Rd @@ -58,7 +58,7 @@ alkAgeDist(WR1.key,lenA.n,len.n) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Lai, H.-L. 1987. Optimum allocation for estimating age composition using age-length key. Fishery Bulletin, 85:179-185. @@ -70,6 +70,6 @@ Quinn, T. J. and R. B. Deriso. 1999. Quantitative Fish Dynamics. Oxford Universi See \code{\link{alkIndivAge}} and related functions for a completely different methodology. See \code{\link[fishmethods]{alkprop}} from \pkg{fishmethods} for the exact same methodology but with a different format for the inputs. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/alkIndivAge.Rd b/man/alkIndivAge.Rd index 6b44a023..b9e3050b 100644 --- a/man/alkIndivAge.Rd +++ b/man/alkIndivAge.Rd @@ -20,7 +20,7 @@ alkIndivAge( \item{data}{A data.frame that minimally contains the length measurements and possibly contains a variable that will receive the age assignments as given in \code{formula}.} -\item{type}{A string that indicates whether to use the semi-random (\code{type="SR"}, default) or completely-random (\code{type="CR"}) methods for assigning ages to individual fish. See the \href{http://derekogle.com/IFAR/}{IFAR chapter} for more details.} +\item{type}{A string that indicates whether to use the semi-random (\code{type="SR"}, default) or completely-random (\code{type="CR"}) methods for assigning ages to individual fish. See the \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{IFAR chapter} for more details.} \item{breaks}{A numeric vector of lower values that define the length intervals. See details.} @@ -113,7 +113,7 @@ Summarize(len~age,data=WR3.comb,digits=2) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Isermann, D.A. and C.T. Knight. 2005. A computer program for age-length keys incorporating age assignment to individual fish. North American Journal of Fisheries Management, 25:1153-1160. [Was (is?) from http://www.tandfonline.com/doi/abs/10.1577/M04-130.1.] } @@ -121,6 +121,6 @@ Isermann, D.A. and C.T. Knight. 2005. A computer program for age-length keys inc See \code{\link{alkAgeDist}} and \code{\link{alkMeanVar}} for alternative methods to derived age distributions and mean (and SD) values for each age. See \code{\link{alkPlot}} for methods to visualize age-length keys. } \author{ -Derek H. Ogle, \email{derek@derekogle.com}. This is largely an R version of the SAS code provided by Isermann and Knight (2005). +Derek H. Ogle, \email{DerekOgle51@gmail.com}. This is largely an R version of the SAS code provided by Isermann and Knight (2005). } \keyword{manip} diff --git a/man/alkMeanVar.Rd b/man/alkMeanVar.Rd index e9c12b9e..92fbc894 100644 --- a/man/alkMeanVar.Rd +++ b/man/alkMeanVar.Rd @@ -71,7 +71,7 @@ alkMeanVar(WR1.key,len~LCat+age,WR1.age,len.n,method="QuinnDeriso") } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Bettoli, P. W. and Miranda, L. E. 2001. A cautionary note about estimating mean length at age with subsampled data. North American Journal of Fisheries Management, 21:425-428. @@ -81,6 +81,6 @@ Quinn, T. J. and R. B. Deriso. 1999. Quantitative Fish Dynamics. Oxford Universi See \code{\link{alkIndivAge}} and related functions for a completely different methodology. See \code{\link{alkAgeDist}} for a related method of determining the proportion of fish at each age. See the \pkg{ALKr} package. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/alkPlot.Rd b/man/alkPlot.Rd index 03954ea4..244fd1e3 100644 --- a/man/alkPlot.Rd +++ b/man/alkPlot.Rd @@ -95,12 +95,12 @@ alkPlot(WR.key,"bubble",col=col2rgbt("black",0.5)) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \seealso{ See \code{\link{alkIndivAge}} for using an age-length key to assign ages to individual fish. See \code{\link[grDevices]{hcl.colors}} for a simple way to choose other colors. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{plot} diff --git a/man/binCI.Rd b/man/binCI.Rd index 55933fd2..3a2fd4a4 100644 --- a/man/binCI.Rd +++ b/man/binCI.Rd @@ -66,6 +66,6 @@ Agresti, A. and B.A. Coull. 1998. Approximate is better than \dQuote{exact} for See \code{\link{binom.test}}; \code{binconf} in \pkg{Hmisc}; and functions in \pkg{binom}. } \author{ -Derek H. Ogle, \email{derek@derekogle.com}, though this is largely based on \code{binom.exact}, \code{binom.wilson}, and \code{binom.approx} from the old epitools package. +Derek H. Ogle, \email{DerekOgle51@gmail.com}, though this is largely based on \code{binom.exact}, \code{binom.wilson}, and \code{binom.approx} from the old epitools package. } \keyword{htest} diff --git a/man/boot.Rd b/man/boot.Rd index e85a86a3..ce34f13d 100644 --- a/man/boot.Rd +++ b/man/boot.Rd @@ -134,6 +134,6 @@ S. Weisberg (2005). \emph{Applied Linear Regression}, third edition. New York: W \code{\link[car]{Boot}} in \pkg{car}. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{htest} diff --git a/man/capFirst.Rd b/man/capFirst.Rd index 73e8c0f1..140e618c 100644 --- a/man/capFirst.Rd +++ b/man/capFirst.Rd @@ -42,6 +42,6 @@ class(fvec1) } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/capHistConvert.Rd b/man/capHistConvert.Rd index da027795..df180cec 100644 --- a/man/capHistConvert.Rd +++ b/man/capHistConvert.Rd @@ -300,12 +300,12 @@ capHistConvert(df1,id="fish",in.type="event") } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \seealso{ See \code{\link{capHistSum}} to summarize \dQuote{individual} capture histories into a format usable in \code{\link{mrClosed}} and \code{\link{mrOpen}}. Also see \pkg{Rcapture}, \code{RMark}, or \pkg{marked} packages for handling more complex analyses. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/capHistSum.Rd b/man/capHistSum.Rd index 8fbdcc3e..e2c63e09 100644 --- a/man/capHistSum.Rd +++ b/man/capHistSum.Rd @@ -85,7 +85,7 @@ plot(ch1,what="u") } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Baillargeon, S. and Rivest, L.-P. (2007). Rcapture: Loglinear models for capture-recapture in R. Journal of Statistical Software, 19(5):1-31. } @@ -93,6 +93,6 @@ Baillargeon, S. and Rivest, L.-P. (2007). Rcapture: Loglinear models for capture See \code{\link[Rcapture]{descriptive}} in \pkg{Rcapture} for \code{m.array} and some of the same values in \code{sum}. See \code{\link{capHistConvert}} for a descriptions of capture history data file formats and how to convert between them. See \code{\link{mrClosed}} and \code{\link{mrOpen}} for how to estimate abundance from the summarized capture history information. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/catchCurve.Rd b/man/catchCurve.Rd index 1f6f93ae..f3cb8fb3 100644 --- a/man/catchCurve.Rd +++ b/man/catchCurve.Rd @@ -180,7 +180,7 @@ plot(cc5) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Maceina, M.J., and P.W. Bettoli. 1998. Variation in Largemouth Bass recruitment in four mainstream impoundments on the Tennessee River. North American Journal of Fisheries Management 18:998-1003. @@ -190,7 +190,7 @@ Ricker, W.E. 1975. Computation and interpretation of biological statistics of fi See \code{\link[fishmethods]{agesurv}} in \pkg{fishmethods} for similar functionality. See \code{\link{chapmanRobson}} and \code{\link[fishmethods]{agesurvcl}} in \pkg{fishmethods} for alternative methods to estimate mortality rates. See \code{\link{metaM}} for empirical methods to estimate natural mortality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{hplot} \keyword{htest} diff --git a/man/chapmanRobson.Rd b/man/chapmanRobson.Rd index 92e6850a..e71a760f 100644 --- a/man/chapmanRobson.Rd +++ b/man/chapmanRobson.Rd @@ -151,7 +151,7 @@ plot(cr3) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Chapman, D.G. and D.S. Robson. 1960. The analysis of a catch curve. Biometrics. 16:354-368. @@ -167,7 +167,7 @@ Smith, M.W., A.Y. Then, C. Wor, G. Ralph, K.H. Pollock, and J.M. Hoenig. 2012. R See \code{\link[fishmethods]{agesurv}} in \pkg{fishmethods} for similar functionality. See \code{\link{catchCurve}} and \code{\link[fishmethods]{agesurvcl}} in \pkg{fishmethods} for alternative methods. See \code{\link{metaM}} for empirical methods to estimate natural mortality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{htest} \keyword{manip} diff --git a/man/col2rgbt.Rd b/man/col2rgbt.Rd index c7e805d7..b795da61 100644 --- a/man/col2rgbt.Rd +++ b/man/col2rgbt.Rd @@ -31,6 +31,6 @@ col2rgbt(clrs,trans) See \code{\link[grDevices]{col2rgb}} for similar functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/depletion.Rd b/man/depletion.Rd index 1cf4c032..263e2596 100644 --- a/man/depletion.Rd +++ b/man/depletion.Rd @@ -167,7 +167,7 @@ plot(d2) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Ricker, W.E. 1975. Computation and interpretation of biological statistics of fish populations. Technical Report Bulletin 191, Bulletin of the Fisheries Research Board of Canada. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/1485.pdf.] @@ -177,7 +177,7 @@ Seber, G.A.F. 2002. The Estimation of Animal Abundance. Edward Arnold, Second ed See \code{\link{removal}} for related functionality and \code{\link[fishmethods]{deplet}} in \pkg{fishmethods} for similar functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{hplot} \keyword{manip} diff --git a/man/dunnTest.Rd b/man/dunnTest.Rd index 811e2af3..73a0b373 100644 --- a/man/dunnTest.Rd +++ b/man/dunnTest.Rd @@ -116,5 +116,5 @@ Dunn, O.J. 1964. Multiple comparisons using rank sums. Technometrics 6:241-252. See \code{kruskal.test}, \code{\link[dunn.test]{dunn.test}} in \pkg{dunn.test}, \code{posthoc.kruskal.nemenyi.test} in \pkg{PMCMR}, \code{kruskalmc} in \pkg{pgirmess}, and \code{kruskal} in \pkg{agricolae}. } \author{ -Derek H. Ogle, \email{derek@derekogle.com}, but this is largely a wrapper (see details) for \code{\link[dunn.test]{dunn.test}} in \pkg{dunn.test} written by Alexis Dinno. +Derek H. Ogle, \email{DerekOgle51@gmail.com}, but this is largely a wrapper (see details) for \code{\link[dunn.test]{dunn.test}} in \pkg{dunn.test} written by Alexis Dinno. } diff --git a/man/expandCounts.Rd b/man/expandCounts.Rd index cbaebd3f..c6a5c806 100644 --- a/man/expandCounts.Rd +++ b/man/expandCounts.Rd @@ -129,6 +129,6 @@ df3 <- expandCounts(df1,~Number.of.Fish,~Length.or.Lower.Length.IN+Length.Upper. See \code{\link{expandLenFreq}} for expanding length frequencies where individual fish measurements were made on individual fish in a subsample and the remaining fish were simply counted. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/expandLenFreq.Rd b/man/expandLenFreq.Rd index 96c4048b..56a8ce77 100644 --- a/man/expandLenFreq.Rd +++ b/man/expandLenFreq.Rd @@ -81,6 +81,6 @@ newlen2 See \code{\link{expandCounts}} for expanding more than just lengths or expanding lengths when there is a known number in each length bin. See \code{\link{lencat}} for creating length bins. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/extraTests.Rd b/man/extraTests.Rd index 50a033d5..0e989327 100644 --- a/man/extraTests.Rd +++ b/man/extraTests.Rd @@ -102,6 +102,6 @@ lrt(fit.0,fit.1,com=fit.2) } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{htest} diff --git a/man/fact2num.Rd b/man/fact2num.Rd index 14d334c9..54668a74 100644 --- a/man/fact2num.Rd +++ b/man/fact2num.Rd @@ -32,6 +32,6 @@ bad2 <- fact2num(bad) } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/fishR.Rd b/man/fishR.Rd index cfeadbfd..d9146a39 100644 --- a/man/fishR.Rd +++ b/man/fishR.Rd @@ -5,7 +5,7 @@ \title{Opens web pages associated with the fishR website.} \usage{ fishR( - where = c("home", "IFAR", "general", "books", "AIFFD", "posts", "news"), + where = c("home", "posts", "books", "IFAR", "AIFFD", "packages", "data"), open = TRUE ) } @@ -18,21 +18,22 @@ fishR( None, but a webpage will be opened in the default browser. } \description{ -Opens web pages associated with the \href{https://derekogle.com/fishR/}{fishR website} in a browser. The user can open the main page or choose a specific page to open. +Opens web pages associated with the \href{https://fishr-core-team.github.io/fishR/}{fishR website} in a browser. The user can open the main page or choose a specific page to open. } \examples{ \dontrun{ ## Opens an external webpage ... only run interactively fishR() # home page -fishR("IFAR") # Introduction to Fisheries Analysis with R page -fishR("general") # examples page +fishR("posts") # blog posts (some examples) page fishR("books") # examples page +fishR("IFAR") # Introduction to Fisheries Analysis with R page fishR("AIFFD") # Analysis & Interpretation of Freshw. Fisher. Data page -fishR("posts") # blog posts (some examples) page +fishR("packages") # list of r-related fishereis packages +fishR("data") # list of fisheries data sets } } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{misc} diff --git a/man/growthModels.Rd b/man/growthModels.Rd index 3071ad16..03f82d0c 100644 --- a/man/growthModels.Rd +++ b/man/growthModels.Rd @@ -286,7 +286,7 @@ text(0.5,0.5,growthFunShow("vonBertalanffy","Francis")) par(op) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Campana, S.E. and C.M. Jones. 1992. Analysis of otolith microstructure data. Pages 73-100 In D.K. Stevenson and S.E. Campana, editors. Otolith microstructure examination and analysis. Canadian Special Publication of Fisheries and Aquatic Sciences 117. [Was (is?) from http://www.dfo-mpo.gc.ca/Library/141734.pdf.] @@ -340,7 +340,7 @@ Winsor, C.P. 1932. The Gompertz curve as a growth curve. Proceedings of the Nati See \code{\link{Schnute}} for an implementation of the Schnute (1981) model. } \author{ -Derek H. Ogle, \email{derek@derekogle.com}, thanks to Gabor Grothendieck for a hint about using \code{get()}. +Derek H. Ogle, \email{DerekOgle51@gmail.com}, thanks to Gabor Grothendieck for a hint about using \code{get()}. } \keyword{hplot} \keyword{manip} diff --git a/man/headtail.Rd b/man/headtail.Rd index 7118a9fd..bf80e2ff 100644 --- a/man/headtail.Rd +++ b/man/headtail.Rd @@ -54,6 +54,6 @@ if (require(tibble)) { \code{peek} } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/hist.formula.Rd b/man/hist.formula.Rd index df815288..16ec61cc 100644 --- a/man/hist.formula.Rd +++ b/man/hist.formula.Rd @@ -108,12 +108,12 @@ hist(Sepal.Length~Species,data=iris,xlab="Sepal Length (cm)",w=0.25) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \seealso{ See base \code{\link[graphics]{hist}} for related functionality and \code{\link[plotrix]{multhist}} in \pkg{plotrix} for similar functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com}, but this implementation is largely a modification of the code provided by Marc Schwartz on the R-help mailing list on 1Jun07. +Derek H. Ogle, \email{DerekOgle51@gmail.com}, but this implementation is largely a modification of the code provided by Marc Schwartz on the R-help mailing list on 1Jun07. } \keyword{hplot} diff --git a/man/histFromSum.Rd b/man/histFromSum.Rd index 7c37c409..785e6d9a 100644 --- a/man/histFromSum.Rd +++ b/man/histFromSum.Rd @@ -74,6 +74,6 @@ histFromSum(Freq~lcat10,data=df2,breaks=brks,xlab=xlbl,col="gray55") See \code{\link[graphics]{hist}} and \code{\link{hist.formula}} for related functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{hplot} diff --git a/man/hyperCI.Rd b/man/hyperCI.Rd index 81dbb37e..4a94717d 100644 --- a/man/hyperCI.Rd +++ b/man/hyperCI.Rd @@ -32,6 +32,6 @@ hyperCI(50,25,10) } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{htest} diff --git a/man/is.odd.Rd b/man/is.odd.Rd index 2527a9b7..01f9e93d 100644 --- a/man/is.odd.Rd +++ b/man/is.odd.Rd @@ -30,6 +30,6 @@ d <- 1:8 data.frame(d,odd=is.odd(d),even=is.even(d)) } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/knitUtil.Rd b/man/knitUtil.Rd index ecac75f3..73694e2c 100644 --- a/man/knitUtil.Rd +++ b/man/knitUtil.Rd @@ -122,6 +122,6 @@ kPvalue(0.000012345,include.p=FALSE,latex=FALSE) See \code{\link{formatC}} for functionality similar to \code{kPvalue}. See \code{purl} and \code{\link[knitr]{knit}} in \pkg{knitr} for functionality similar to \code{purl2}. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/ksTest.Rd b/man/ksTest.Rd index bddf031a..23a20d4e 100644 --- a/man/ksTest.Rd +++ b/man/ksTest.Rd @@ -70,6 +70,6 @@ ksTest(dat~grp,data=df) \code{\link[stats]{ks.test}}. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{htest} diff --git a/man/lagratio.Rd b/man/lagratio.Rd index 993c5785..0f4c38d2 100644 --- a/man/lagratio.Rd +++ b/man/lagratio.Rd @@ -57,6 +57,6 @@ lagratio(10:1,2,2,direction="forward") \code{diff} } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/lencat.Rd b/man/lencat.Rd index d4b8a560..3190c383 100644 --- a/man/lencat.Rd +++ b/man/lencat.Rd @@ -215,9 +215,9 @@ str(smb2) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/logbtcf.Rd b/man/logbtcf.Rd index 4b8fa9d8..ff604377 100644 --- a/man/logbtcf.Rd +++ b/man/logbtcf.Rd @@ -48,6 +48,6 @@ all.equal(cpe,cp10) Sprugel, D.G. 1983. Correcting for bias in log-transformed allometric equations. Ecology 64:209-210. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/lwCompPreds.Rd b/man/lwCompPreds.Rd index bd730cce..ff27f0ad 100644 --- a/man/lwCompPreds.Rd +++ b/man/lwCompPreds.Rd @@ -105,9 +105,9 @@ par(op) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/metaM.Rd b/man/metaM.Rd index c494a020..6526c838 100644 --- a/man/metaM.Rd +++ b/man/metaM.Rd @@ -142,7 +142,7 @@ metaM(Mmethods("Hoenig"),K=K,Linf=Linf,Temp=Temp,tmax=tmax,t50=t50) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Alverson, D.L. and M.J. Carney. 1975. A graphic review of the growth and decay of population cohorts. Journal du Conseil International pour l'Exploration de la Mer. 36:133-143. @@ -172,6 +172,6 @@ Zhang, C-I and B.A. Megrey. 2006. A revised Alverson and Carney model for estima See \code{\link[fishmethods]{M.empirical}} in \pkg{fishmethods} for similar functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/mrClosed.Rd b/man/mrClosed.Rd index eab0bbfd..bce2d954 100644 --- a/man/mrClosed.Rd +++ b/man/mrClosed.Rd @@ -264,7 +264,7 @@ confint(mr7) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Krebs, C.J. 1989. Ecological Methodology. Addison-Welsey Educational Publishing. @@ -280,6 +280,6 @@ Schumacher, F.X. and R.W. Eschmeyer. 1943. The estimation of fish populations in See \code{\link{capHistSum}} for generating input data from capture histories. See \code{\link{poiCI}}, \code{\link{binCI}}, and \code{\link{hyperCI}} for specifics on functions used in confidence interval construction. See \code{\link{mrOpen}} for handling mark-recapture data in an open population. See \code{\link[FSAdata]{SunfishIN}} in \pkg{FSAdata} for an example to test matching of results with Ricker (1975)' See \code{\link[fishmethods]{mrN.single}} and \code{\link[fishmethods]{schnabel}} in \pkg{fishmethods} for similar functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/mrOpen.Rd b/man/mrOpen.Rd index d1310e70..7dfe13eb 100644 --- a/man/mrOpen.Rd +++ b/man/mrOpen.Rd @@ -138,7 +138,7 @@ ex3a <- jolly(jolly.top,jolly.bot) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Jolly, G.M. 1965. Explicit estimates from capture-recapture data with both death and immigration -- stochastic model. Biometrika, 52:225-247. @@ -158,6 +158,6 @@ Seber, G.A.F. 2002. The Estimation of Animal Abundance. Edward Arnold, second ed \code{\link{capHistSum}}, \code{\link{mrClosed}} } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/nlsBoot.Rd b/man/nlsBoot.Rd index 98425465..ed5575ac 100644 --- a/man/nlsBoot.Rd +++ b/man/nlsBoot.Rd @@ -107,6 +107,6 @@ if (require(nlstools)) { \code{\link[car]{Boot}} and related methods in \pkg{car} and \code{summary.\link[nlstools]{nlsBoot}} in \pkg{nlstools}. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{htest} diff --git a/man/nlsTracePlot.Rd b/man/nlsTracePlot.Rd index 15a69870..51408c34 100644 --- a/man/nlsTracePlot.Rd +++ b/man/nlsTracePlot.Rd @@ -96,6 +96,6 @@ if (require(FSAdata)) { } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{plot} diff --git a/man/peek.Rd b/man/peek.Rd index 302e9984..b66356ca 100644 --- a/man/peek.Rd +++ b/man/peek.Rd @@ -51,7 +51,7 @@ if (require(dplyr)) { \code{headtail} } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} A. Powell Wheeler, \email{powell.wheeler@gmail.com} } diff --git a/man/perc.Rd b/man/perc.Rd index 23aeb7e2..ffce9f3e 100644 --- a/man/perc.Rd +++ b/man/perc.Rd @@ -50,6 +50,6 @@ perc(tmp,5,"lt",na.rm=FALSE) } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{misc} diff --git a/man/plotAB.Rd b/man/plotAB.Rd index 79638054..76e0d340 100644 --- a/man/plotAB.Rd +++ b/man/plotAB.Rd @@ -120,7 +120,7 @@ Campana, S.E., M.C. Annand, and J.I. McMillan. 1995. Graphical and statistical m See \code{\link{ageBias}} and its plot method for what I consider a better age-bias plot; \code{\link{agePrecision}} for measures of precision between pairs of age estimates; and \code{\link[fishmethods]{compare2}} in \pkg{fishmethods} for similar functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{htest} \keyword{manip} diff --git a/man/poiCI.Rd b/man/poiCI.Rd index fbb80879..0dbc13be 100644 --- a/man/poiCI.Rd +++ b/man/poiCI.Rd @@ -49,6 +49,6 @@ poiCI(c(7,10),type="exact",verbose=TRUE) } \author{ -Derek H. Ogle, \email{derek@derekogle.com}, though this is largely based on \code{pois.exact}, \code{pois.daly}, \code{pois.byar}, and \code{pois.approx} from the old epitools package. +Derek H. Ogle, \email{DerekOgle51@gmail.com}, though this is largely based on \code{pois.exact}, \code{pois.daly}, \code{pois.byar}, and \code{pois.approx} from the old epitools package. } \keyword{htest} diff --git a/man/psdAdd.Rd b/man/psdAdd.Rd index e2f29a83..f551e38f 100644 --- a/man/psdAdd.Rd +++ b/man/psdAdd.Rd @@ -113,7 +113,7 @@ df } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Guy, C.S., R.M. Neumann, and D.W. Willis. 2006. New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS). Fisheries 31:86-87. [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.] @@ -125,6 +125,6 @@ Willis, D.W., B.R. Murphy, and C.S. Guy. 1993. Stock density indices: developmen \code{\link{psdVal}}, \code{\link{psdCalc}}, \code{\link{psdPlot}}, \code{\link{PSDlit}}, and \code{\link{wrAdd}} for related functions. See \code{\link[plyr]{mapvalues}} for help in changing species names to match those in \code{\link{PSDlit}}. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/psdCI.Rd b/man/psdCI.Rd index 2d0874fd..15bfebd7 100644 --- a/man/psdCI.Rd +++ b/man/psdCI.Rd @@ -88,7 +88,7 @@ bcis } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Brenden, T.O., T. Wagner, and B.R. Murphy. 2008. Novel tools for analyzing proportional size distribution index data. North American Journal of Fisheries Management 28:1233-1242. [Was (is?) from http://qfc.fw.msu.edu/Publications/Publication\%20List/2008/Novel\%20Tools\%20for\%20Analyzing\%20Proportional\%20Size\%20Distribution_Brenden.pdf.] } @@ -96,6 +96,6 @@ Brenden, T.O., T. Wagner, and B.R. Murphy. 2008. Novel tools for analyzing propo See \code{\link{psdVal}}, \code{\link{psdPlot}}, \code{\link{psdAdd}}, \code{\link{PSDlit}}, \code{\link{tictactoe}}, \code{\link{lencat}}, and \code{\link{rcumsum}} for related functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{hplot} diff --git a/man/psdCalc.Rd b/man/psdCalc.Rd index 5b809e9c..13704c34 100644 --- a/man/psdCalc.Rd +++ b/man/psdCalc.Rd @@ -109,7 +109,7 @@ psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}Chapman & Hall/CRC, Boca Raton, FL. Guy, C.S., R.M. Neumann, and D.W. Willis2006New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS)Fisheries 31:86-87 [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.] @@ -123,6 +123,6 @@ Willis, D.W., B.R. Murphy, and C.S. Guy1993Stock density indices: development, u See \code{\link{psdVal}}, \code{\link{psdPlot}}, \code{\link{psdAdd}}, \code{\link{PSDlit}}, \code{\link{tictactoe}}, \code{\link{lencat}}, and \code{\link{rcumsum}} for related functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{hplot} diff --git a/man/psdPlot.Rd b/man/psdPlot.Rd index 4f0219c5..dcd5693f 100644 --- a/man/psdPlot.Rd +++ b/man/psdPlot.Rd @@ -108,7 +108,7 @@ par(op) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Guy, C.S., R.M. Neumann, and D.W. Willis. 2006. New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS). Fisheries 31:86-87. [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.] @@ -120,6 +120,6 @@ Willis, D.W., B.R. Murphy, and C.S. Guy. 1993. Stock density indices: developmen See \code{\link{psdVal}}, \code{\link{psdCalc}}, \code{\link{psdAdd}}, \code{\link{PSDlit}}, \code{\link{lencat}}, \code{\link{tictactoe}}, \code{\link{lencat}}, and \code{\link{rcumsum}} for related functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{hplot} diff --git a/man/psdVal.Rd b/man/psdVal.Rd index 6104668c..50b3125c 100644 --- a/man/psdVal.Rd +++ b/man/psdVal.Rd @@ -65,7 +65,7 @@ psdVal("Bluegill",showJustSource=TRUE) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Guy, C.S., R.M. Neumann, and D.W. Willis. 2006. New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS). Fisheries 31:86-87. [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.] @@ -77,6 +77,6 @@ Willis, D.W., B.R. Murphy, and C.S. Guy. 1993. Stock density indices: developmen See \code{\link{psdCalc}}, \code{\link{psdPlot}}, \code{\link{psdAdd}}, \code{\link{PSDlit}}, \code{\link{tictactoe}}, \code{\link{lencat}}, and \code{\link{rcumsum}} for related functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/rcumsum.Rd b/man/rcumsum.Rd index 1d9a0859..4b7a9294 100644 --- a/man/rcumsum.Rd +++ b/man/rcumsum.Rd @@ -66,6 +66,6 @@ rcumsum(tbl) \code{\link{cumsum}}. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{misc} diff --git a/man/removal.Rd b/man/removal.Rd index 87a84645..ec1d080a 100644 --- a/man/removal.Rd +++ b/man/removal.Rd @@ -226,7 +226,7 @@ fnl1 } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Carle, F.L. and M.R. Strub. 1978. A new method for estimating population size from removal data. Biometrics, 34:621-630. @@ -250,7 +250,7 @@ Van Deventer, J.S., and W.S. Platts. 1983. Sampling and estimating fish populati See \code{\link{depletion}} for related functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} A. Powell Wheeler, \email{powell.wheeler@gmail.com} } diff --git a/man/repeatedRows2Keep.Rd b/man/repeatedRows2Keep.Rd index 31ae87f1..77cc9256 100644 --- a/man/repeatedRows2Keep.Rd +++ b/man/repeatedRows2Keep.Rd @@ -41,6 +41,6 @@ droplevels(subset(test1,keepLast)) # should be all "Last" or "Both" (7 items) } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/se.Rd b/man/se.Rd index 9659a586..a72bf485 100644 --- a/man/se.Rd +++ b/man/se.Rd @@ -36,6 +36,6 @@ se(x2) See \code{se} in \pkg{sciplot} for similar functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/srFuns.Rd b/man/srFuns.Rd index a2cc0105..2205c66b 100644 --- a/man/srFuns.Rd +++ b/man/srFuns.Rd @@ -111,7 +111,7 @@ text(0.5,0.5,srFunShow("Shepherd")) par(op) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Beverton, R.J.H. and S.J. Holt. 1957. On the dynamics of exploited fish populations, Fisheries Investigations (Series 2), volume 19. United Kingdom Ministry of Agriculture and Fisheries, 533 pp. @@ -129,6 +129,6 @@ Shepherd, J. 1982. A versatile new stock-recruitment relationship for fisheries See \code{\link{srStarts}} for related functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com}, thanks to Gabor Grothendieck for a hint about using \code{get()}. +Derek H. Ogle, \email{DerekOgle51@gmail.com}, thanks to Gabor Grothendieck for a hint about using \code{get()}. } \keyword{manip} diff --git a/man/srStarts.Rd b/man/srStarts.Rd index 023d5454..904c7e27 100644 --- a/man/srStarts.Rd +++ b/man/srStarts.Rd @@ -92,7 +92,7 @@ srStarts(recruits~stock,data=CodNorwegian,type="independence",plot=TRUE) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. Beverton, R.J.H. and S.J. Holt. 1957. On the dynamics of exploited fish populations, Fisheries Investigations (Series 2), volume 19. United Kingdom Ministry of Agriculture and Fisheries, 533 pp. @@ -110,6 +110,6 @@ Shepherd, J. 1982. A versatile new stock-recruitment relationship for fisheries See \code{\link{srFunShow}} and \code{\link{srFuns}} for related functionality. See \code{\link{nlsTracePlot}} for help troubleshooting nonlinear models that don't converge. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/sumTable.Rd b/man/sumTable.Rd index 589f008a..ea349e4c 100644 --- a/man/sumTable.Rd +++ b/man/sumTable.Rd @@ -58,6 +58,6 @@ sumTable(dat~g1,data=d,FUN=mean,na.rm=TRUE) See \code{\link[base]{tapply}} for a more general implementation. See \code{\link{Summarize}} for a similar computation when only one factor variable is given. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{hplot} diff --git a/man/tictactoe.Rd b/man/tictactoe.Rd index 2fe3eb7d..c3e8d520 100644 --- a/man/tictactoe.Rd +++ b/man/tictactoe.Rd @@ -83,12 +83,12 @@ text(prey[,1],pred[,1],labels=c(2010,2011,2012),adj=c(-0.5,-0.5)) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \seealso{ See \code{\link{psdVal}} and \code{\link{psdCalc}} for related functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{hplot} diff --git a/man/validn.Rd b/man/validn.Rd index 1af32f20..6ece0075 100644 --- a/man/validn.Rd +++ b/man/validn.Rd @@ -39,6 +39,6 @@ validn(junk6) See \code{\link[plotrix]{valid.n}} in \pkg{plotrix} and \code{nobs} in \pkg{gdata} for similar functionality. See \code{\link{is.na}} for finding the missing values. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/vbStarts.Rd b/man/vbStarts.Rd index f90b03fc..c0a42216 100644 --- a/man/vbStarts.Rd +++ b/man/vbStarts.Rd @@ -152,7 +152,7 @@ vbStarts(tl~age,data=SpotVA1,type="Ogle",valOgle=c(tr=2),fixed=list(Lr=10),plot= } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. See references in \code{\link{vbFuns}}. } @@ -160,6 +160,6 @@ See references in \code{\link{vbFuns}}. See \code{\link{growthFunShow}} to display the equations for the parameterizations used in \pkg{FSA} and \code{\link{vbFuns}} for functions that represent the von Bertalanffy parameterizations. See \code{\link{nlsTracePlot}} for help troubleshooting nonlinear models that don't converge. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/wrAdd.Rd b/man/wrAdd.Rd index a4425dfa..bf9a0951 100644 --- a/man/wrAdd.Rd +++ b/man/wrAdd.Rd @@ -71,12 +71,12 @@ bg$Wr4 <- wrAdd(wt~tl+species,data=bg) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \seealso{ See \code{\link{wsVal}}, \code{\link{WSlit}}, and \code{\link{psdAdd}} for related functionality. See \code{\link[plyr]{mapvalues}} for help in changing species names to match those in \code{\link{WSlit}}. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/man/wsVal.Rd b/man/wsVal.Rd index 53c61428..6f6dbafd 100644 --- a/man/wsVal.Rd +++ b/man/wsVal.Rd @@ -52,12 +52,12 @@ wsVal("Ruffe",units="metric",ref=50,simplify=TRUE) } \references{ -Ogle, D.H. 2016. \href{http://derekogle.com/IFAR/}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. +Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. } \seealso{ See \code{\link{wrAdd}} and \code{\link{WSlit}} for related functionality. } \author{ -Derek H. Ogle, \email{derek@derekogle.com} +Derek H. Ogle, \email{DerekOgle51@gmail.com} } \keyword{manip} diff --git a/tests/testthat/testthat_FSAUtils.R b/tests/testthat/testthat_FSAUtils.R index e0b9b755..86463b04 100644 --- a/tests/testthat/testthat_FSAUtils.R +++ b/tests/testthat/testthat_FSAUtils.R @@ -177,19 +177,19 @@ test_that("fact2num() results",{ test_that("fishR() return values",{ expect_equal(fishR(open=FALSE), - "https://derekogle.com/fishR") - expect_equal(fishR("IFAR",open=FALSE), - "https://derekogle.com/IFAR") - expect_equal(fishR("general",open=FALSE), - "https://derekogle.com/fishR/examples") - expect_equal(fishR("AIFFD",open=FALSE), - "https://derekogle.com/aiffd2007") + "https://fishr-core-team.github.io/fishR/") expect_equal(fishR("posts",open=FALSE), - "https://derekogle.com/fishR/blog") + "https://fishr-core-team.github.io/fishR/blog/") expect_equal(fishR("books",open=FALSE), - "https://derekogle.com/fishR/examples") - expect_equal(fishR("news",open=FALSE), - "https://derekogle.com/fishR/blog") + "https://fishr-core-team.github.io/fishR/pages/books.html") + expect_equal(fishR("IFAR",open=FALSE), + "https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r") + expect_equal(fishR("AIFFD",open=FALSE), + "https://fishr-core-team.github.io/fishR/pages/books.html#analysis-and-interpretation-of-freshwater-fisheries-data-i") + expect_equal(fishR("packages",open=FALSE), + "https://fishr-core-team.github.io/fishR/pages/packages.html") + expect_equal(fishR("data",open=FALSE), + "https://fishr-core-team.github.io/fishR/pages/data_fishR_alpha.html") }) test_that("geomean() / geosd() results",{ From 972b0b84dfaf779f959ed6c3fcc50a20e6baa87c Mon Sep 17 00:00:00 2001 From: Derek Ogle Date: Tue, 20 Dec 2022 14:23:47 -0600 Subject: [PATCH 2/8] Clarified acronyms in wSlit and italicized et al.s --- NEWS.md | 1 + R/WSlit.R | 22 ++++++++++++++-------- R/chapmanRobson.R | 4 ++-- R/growthModels.R | 2 +- R/metaM.R | 18 +++++++++--------- R/mrOpen.R | 6 +++--- R/psdCI.R | 4 ++-- man/WSlit.Rd | 22 ++++++++++++++-------- man/chapmanRobson.Rd | 4 ++-- man/growthModels.Rd | 2 +- man/metaM.Rd | 18 +++++++++--------- man/mrOpen.Rd | 6 +++--- man/psdCI.Rd | 4 ++-- 13 files changed, 63 insertions(+), 50 deletions(-) diff --git a/NEWS.md b/NEWS.md index 655daea7..2dcbe6b8 100644 --- a/NEWS.md +++ b/NEWS.md @@ -13,6 +13,7 @@ * `alkIndivAge()`: Modified. Added a catch for `NA`s in the length sample. Also added a test. This addresses [#88](https://github.com/fishR-Core-Team/FSA/issues/88). * `confint.boot()`: Modified. Changed hard-coding of columns that contained the confidence interval values to find those columns by `grep()`ing for the `%` sign. This fixes an issue related to `car::Confint()` returning the `coef()` results for functions that have a `coef()` method but not for those that do not. Also updated tests to use results from `car::Boot()` rather than the old `car::bootCase()`. +* `wSlit`: Modified documentation. Described the `RLP` and `EmP` acronyms and provided references for them. This addresses [#95](https://github.com/fishR-Core-Team/FSA/issues/95)). # FSA 0.9.3 * Moved `dplyr` from `imports` to `suggests` (needed because functions were removed in last version; however it is still used in some examples; partially addresses [#87](https://github.com/fishR-Core-Team/FSA/issues/87)). diff --git a/R/WSlit.R b/R/WSlit.R index 6255517f..814d9218 100644 --- a/R/WSlit.R +++ b/R/WSlit.R @@ -10,18 +10,18 @@ #' #' @format A data frame with observations on the following 13 variables: #' \describe{ -#' \item{species}{Species name.} +#' \item{species}{Species name. Use \code{wsVal()} to see the list of available species.} #' \item{units}{Units of measurements. \code{Metric} uses lengths in mm and weight in grams. \code{English} uses lengths in inches and weight in pounds.} #' \item{type}{Type of equation (\code{linear} or \code{quadratic}).} #' \item{ref}{Reference quartile (\code{75}, \code{50}, or \code{25}).} #' \item{measure}{The type of length measurement used -- total length (\code{TL}) or fork length (\code{FL}).} -#' \item{method}{The type of method used to derive the equation (\code{RLP},\code{EmP}, or \code{Other}).} +#' \item{method}{The type of method used to derive the equation (Regression Line Percentile (\code{RLP}; see Murphy \emph{et al.} (1990) and Murphy \emph{et al.} (1991)), Empirical Percentile (\code{EmP}; see Gerow \emph{et al.} (2005)), or \code{Other}).} #' \item{min.len}{Minimum total length (mm or in, depending on \code{units}) for which the equation should be applied.} #' \item{max.len}{Maximum total length (mm or in, depending on \code{units}) for which the equation should be applied.} #' \item{int}{The intercept for the model.} -#' \item{slope}{The slope for the linear models or the linear coefficient for the quadratic equation.} -#' \item{quad}{The quadratic coefficient in the quadratic equations.} -#' \item{source}{Source of the equation. These match the sources given in Neumann et al. 2012.} +#' \item{slope}{The slope for the linear equation or the linear coefficient for the quadratic equation.} +#' \item{quad}{The quadratic coefficient in the quadratic equation.} +#' \item{source}{Source of the equation. These match the sources given in Neumann \emph{et al.} (2012).} #' \item{comment}{Comments about use of equation.} #' } #' @@ -41,10 +41,16 @@ #' @section IFAR Chapter: 8-Condition. #' #' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. -#' -#' @source Most of these equations can be found in Neumann, R.M., C.S. Guy, and D.W. Willis. 2012. Length, Weight, and Associated Indices. Chapter 14 in Zale, A.V., D.L. Parrish, and T.M. Sutton, editors. Fisheries Techniques. American Fisheries Society, Bethesda, MD. #' -#' Some species were not in Neumann et al (2012) and are noted as such in the \code{comments} variable. +#' Gerow, K.G., R.C. Anderson-Sprecher, and W.A. Hubert. 2005. A new method to compute standard weight equations that reduces length-related bias. North American Journal of Fisheries Management 25:1288–1300. +#' +#' Murphy, B.R., M.L. Brown, and T.A. Springer. 1990. Evaluation of the relative weight (Wr) index, with new applications to walleye. North American Journal of Fisheries Management 10:85–97. +#' +#' Murphy, B. R., D. W. Willis, and T. A. Springer. 1991. The relative weight index in fisheries management: Status and needs. Fisheries (Bethesda) 16(2):30–38. +#' +#' Neumann, R.M., C.S. Guy, and D.W. Willis. 2012. Length, Weight, and Associated Indices. Chapter 14 in Zale, A.V., D.L. Parrish, and T.M. Sutton, editors. Fisheries Techniques. American Fisheries Society, Bethesda, MD. +#' +#' @source Most of these equations can be found in Neumann \emph{et al.} (2012). Species not in Neumann \emph{et al.} (2012) are noted as such in the \code{comments} variable. #' #' @keywords datasets #' diff --git a/R/chapmanRobson.R b/R/chapmanRobson.R index c5097e4f..c375e0b4 100644 --- a/R/chapmanRobson.R +++ b/R/chapmanRobson.R @@ -4,7 +4,7 @@ #' #' @details The default is to use all ages in the age vector. This is only appropriate if the age and catch vectors contain only the ages and catches on the descending limb of the catch curve. Use \code{ages2use} to isolate only the catch and ages on the descending limb. #' -#' The Chapman-Robson method provides an estimate of the annual survival rate, with the annual mortality rate (A) determined by 1-S. The instantaneous mortality rate is often computed as -log(S). However, Hoenig et al. (1983) showed that this produced a biased (over)estimate of Z and provided a correction. The correction is applied by setting \code{zmethod="Hoenigetal"}. Smith et al. (2012) showed that the Hoenig et al. method should be corrected for a variance inflation factor. This correction is applied by setting \code{zmethod="Smithetal"} (which is the default behavior). Choose \code{zmethod="original"} to use the original estimates for Z and it's SE as provided by Chapman and Robson. +#' The Chapman-Robson method provides an estimate of the annual survival rate, with the annual mortality rate (A) determined by 1-S. The instantaneous mortality rate is often computed as -log(S). However, Hoenig \emph{et al.} (1983) showed that this produced a biased (over)estimate of Z and provided a correction. The correction is applied by setting \code{zmethod="Hoenigetal"}. Smith \emph{et al.} (2012) showed that the Hoenig \emph{et al.} method should be corrected for a variance inflation factor. This correction is applied by setting \code{zmethod="Smithetal"} (which is the default behavior). Choose \code{zmethod="original"} to use the original estimates for Z and it's SE as provided by Chapman and Robson. #' #' @param x A numerical vector of the assigned ages in the catch curve or a formula of the form \code{catch~age} when used in \code{chapmanRobson}. An object saved from \code{chapmanRobson} (i.e., of class \code{chapmanRobson}) when used in the methods. #' @param object An object saved from the \code{chapmanRobson} call (i.e., of class \code{chapmanRobson}). @@ -40,7 +40,7 @@ #' #' @section Testing: Tested the results of chapmanRobson against the results in Miranda and Bettoli (2007). The point estimates of S matched perfectly but the SE of S did not because Miranda and Bettoli used a rounded estimate of S in the calculation of the SE of S but chapmanRobson does not. #' -#' Tested the results against the results from \code{agesurv} in \pkg{fishmethods} using the \code{rockbass} data.frame in \pkg{fishmethods}. Results for Z and the SE of Z matched perfectly for non-bias-corrected results. The estimate of Z, but not the SE of Z, matched for the bias-corrected (following Smith et al. (2012)) results. \pkg{FSA} uses equation 2 from Smith et al. (2012) whereas \pkg{fishmethods} appears to use equation 5 from the same source to estimate the SE of Z. +#' Tested the results against the results from \code{agesurv} in \pkg{fishmethods} using the \code{rockbass} data.frame in \pkg{fishmethods}. Results for Z and the SE of Z matched perfectly for non-bias-corrected results. The estimate of Z, but not the SE of Z, matched for the bias-corrected (following Smith \emph{et al.} (2012)) results. \pkg{FSA} uses equation 2 from Smith \emph{et al.} (2012) whereas \pkg{fishmethods} appears to use equation 5 from the same source to estimate the SE of Z. #' #' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' diff --git a/R/growthModels.R b/R/growthModels.R index fec07eac..d6bea54f 100644 --- a/R/growthModels.R +++ b/R/growthModels.R @@ -30,7 +30,7 @@ #' \item In the Quinn and Deriso (1999) functions (the \sQuote{QuinnDerisoX} functions), the a parameter here is equal to lambda/K there and the gi parameter here is equal to the K parameter there. Also note that their Y is L here. #' \item In the Ricker (1979)[p. 705] functions (the \sQuote{RickerX} functions), the a parameter here is equal to k there and the gi parameter here is equal to the g parameter there. Also note that their w is L here. In the Ricker (1979) functions as presented in Campana and Jones (1992), the a parameter here is equal to k parameter there and the gi parameter here is equal to the G parameter there. Also note that their X is L here. #' \item The function in Ricker (1975)[p. 232] is the same as \sQuote{Ricker2} where the a parameter here is qual to G there and the gi parameter here is equal to the g parameter there. Also note that their w is L here. -#' \item The function in Quist et al. (2012)[p. 714] is the same as \sQuote{Ricker1} where the gi parameter here is equal to the G parameter there and the ti parameter here is equal to the t0 parameter there. +#' \item The function in Quist \emph{et al.} (2012)[p. 714] is the same as \sQuote{Ricker1} where the gi parameter here is equal to the G parameter there and the ti parameter here is equal to the t0 parameter there. #' \item The function in Katsanevakis and Maravelias (2008) is the same as \sQuote{Ricker1} where the gi parameter here is equal to k2 parameter there and the ti parameter here is equal to the t2 parameter there. #' } #' \item Richards diff --git a/R/metaM.R b/R/metaM.R index a0683b9c..eafe1849 100644 --- a/R/metaM.R +++ b/R/metaM.R @@ -4,22 +4,22 @@ #' #' @details One of several methods is chosen with \code{method}. The available methods can be seen with \code{Mmethods()} and are listed below with a brief description of where the equation came from. The sources (listed below) should be consulted for more specific information. #' \itemize{ -#' \item \code{method="HoenigNLS"}: The \dQuote{modified Hoenig equation derived with a non-linear model} as described in Then et al. (2015) on the third line of Table 3. This method was the preferred method suggested by Then et al. (2015). Requires only \code{tmax}. -#' \item \code{method="PaulyLNoT"}: The \dQuote{modified Pauly length equation} as described on the sixth line of Table 3 in Then et al. (2015). Then et al. (2015) suggested that this is the preferred model if maximum age (tmax) information was not available. Requires \code{K} and \code{Linf}. +#' \item \code{method="HoenigNLS"}: The \dQuote{modified Hoenig equation derived with a non-linear model} as described in Then \emph{et al.} (2015) on the third line of Table 3. This method was the preferred method suggested by Then \emph{et al.} (2015). Requires only \code{tmax}. +#' \item \code{method="PaulyLNoT"}: The \dQuote{modified Pauly length equation} as described on the sixth line of Table 3 in Then \emph{et al.} (2015). Then \emph{et al.} (2015) suggested that this is the preferred model if maximum age (tmax) information was not available. Requires \code{K} and \code{Linf}. #' \item \code{method="PaulyL"}: The \dQuote{Pauly (1980) equation using fish lengths} from his equation 11. This is the most commonly used method in the literature. Note that Pauly used common logarithms as used here but the model is often presented in other sources with natural logarithms. Requires \code{K}, \code{Linf}, and \code{T}. #' \item \code{method="PaulyW"}: The \dQuote{Pauly (1980) equation for weights} from his equation 10. Requires \code{K}, \code{Winf}, and \code{T}. #' \item \code{method="HoeingO"}, \code{method="HoeingOF"}, \code{method="HoeingOM"}, \code{method="HoeingOC"}: The original \dQuote{Hoenig (1983) composite}, \dQuote{fish}, \dQuote{mollusc}, and \dQuote{cetacean} (fit with OLS) equations from the second column on page 899 of Hoenig (1983). Requires only \code{tmax}. #' \item \code{method="HoeingO2"}, \code{method="HoeingO2F"}, \code{method="HoeingO2M"}, \code{method="HoeingO2C"}: The original \dQuote{Hoenig (1983) composite}, \dQuote{fish}, \dQuote{mollusc}, and \dQuote{cetacean} (fit with Geometric Mean Regression) equations from the second column on page 537 of Kenchington (2014). Requires only \code{tmax}. -#' \item \code{method="HoenigLM"}: The \dQuote{modified Hoenig equation derived with a linear model} as described in Then et al. (2015) on the second line of Table 3. Requires only \code{tmax}. -#' \item \code{method="HewittHoenig"}: The \dQuote{Hewitt and Hoenig (2005) equation} from their equation 8. Requires only \code{tmax}. -#' \item \code{method="tmax1"}: The \dQuote{one-parameter tmax equation} from the first line of Table 3 in Then et al. (2015). Requires only \code{tmax}. -#' \item \code{method="K1"}: The \dQuote{one-parameter K equation} from the fourth line of Table 3 in Then et al. (2015). Requires only \code{K}. -#' \item \code{method="K2"}: The \dQuote{two-parameter K equation} from the fifth line of Table 3 in Then et al. (2015). Requires only \code{K}. +#' \item \code{method="HoenigLM"}: The \dQuote{modified Hoenig equation derived with a linear model} as described in Then \emph{et al.} (2015) on the second line of Table 3. Requires only \code{tmax}. +#' \item \code{method="HewittHoenig"}: The \dQuote{Hewitt and Hoenig (2005) equation} from their equation 8. Requires only \code{tmax}. +#' \item \code{method="tmax1"}: The \dQuote{one-parameter tmax equation} from the first line of Table 3 in Then \emph{et al.} (2015). Requires only \code{tmax}. +#' \item \code{method="K1"}: The \dQuote{one-parameter K equation} from the fourth line of Table 3 in Then \emph{et al.} (2015). Requires only \code{K}. +#' \item \code{method="K2"}: The \dQuote{two-parameter K equation} from the fifth line of Table 3 in Then \emph{et al.} (2015). Requires only \code{K}. #' \item \code{method="JensenK1"}: The \dQuote{Jensen (1996) one-parameter K equation}. Requires only \code{K}. #' \item \code{method="JensenK2"}: The \dQuote{Jensen (2001) two-parameter K equation} from their equation 8. Requires only \code{K}. -#' \item \code{method="Gislason"}: The \dQuote{Gislason et al. (2010) equation} from their equation 2. Requires \code{K}, \code{Linf}, and \code{L}. +#' \item \code{method="Gislason"}: The \dQuote{Gislason \emph{et al.} (2010) equation} from their equation 2. Requires \code{K}, \code{Linf}, and \code{L}. #' \item \code{method="AlversonCarney"}: The \dQuote{Alverson and Carney (1975) equation} as given in equation 10 of Zhang and Megrey (2006). Requires \code{tmax} and \code{K}. -#' \item \code{method="Charnov"}: The \dQuote{Charnov et al. (2013) equation} as given in the second column of page 545 of Kenchington (2014). Requires \code{K}, \code{Linf}, and \code{L}. +#' \item \code{method="Charnov"}: The \dQuote{Charnov \emph{et al.} (2013) equation} as given in the second column of page 545 of Kenchington (2014). Requires \code{K}, \code{Linf}, and \code{L}. #' \item \code{method="ZhangMegreyD"}, \code{method="ZhangMegreyP"}: The \dQuote{Zhang and Megrey (2006) equation} as given in their equation 8 but modified for demersal or pelagic fish. Thus, the user must choose the fish type with \code{group}. Requires \code{tmax}, \code{K}, \code{t0}, \code{t50}, and \code{b}. #' \item \code{method="RikhterEfanov1"}: The \dQuote{Rikhter and Efanov (1976) equation (#2)} as given in the second column of page 541 of Kenchington (2014) and in Table 6.4 of Miranda and Bettoli (2007). Requires only \code{t50}. #' \item \code{method="RikhterEfanov2"}: The \dQuote{Rikhter and Efanov (1976) equation (#1)} as given in the first column of page 541 of Kenchington (2014). Requires \code{t50}, \code{K}, \code{t0}, and \code{b}. diff --git a/R/mrOpen.R b/R/mrOpen.R index 6834033b..960d3799 100644 --- a/R/mrOpen.R +++ b/R/mrOpen.R @@ -8,7 +8,7 @@ #' #' If \code{mb.top} is a matrix then it must be square, must have non-negative and no NA values in the upper triangle, and all NA values on the lower triangle and diagonal. If \code{mb.bot} is a matrix then it must have four rows named \code{m}, \code{u}, \code{n}, and \code{R} (see \code{\link{capHistSum}} for definitions), all values must be non-NA, and the first value of \code{m} must be 0. The last value of \code{R} can either be 0 or some positive number (it is ultimately ignored in all calculations). #' -#' All parameter estimates are performed using equations 4.6-4.9 from Pollock et al (1990) and from page 204 in Seber 2002. If \code{type="Jolly"} then all standard errors (square root of the variances) are from equations 4.11, 4.12, and 4.14 in Pollock et al. (1990) (these are different than those in Seber (2002) ... see Pollock et al.'s note on page 21). If \code{type="Jolly"} and \code{phi.full=TRUE} then the full variance for the phi parameter is given as in eqn 4.18 in Pollock et al. (1990), otherwise eqn 4.13 from Pollock et al. (1990) is used. When \code{type="Jolly"} the confidence interval are produced using normal theory (i.e., estimate +/- z*SE). If \code{type="Manly"} then the confidence intervals for N and phi (none will be produced for B) are constructed using the methods of Manly (1984) and as described in 2.24-2.33 of Krebs (1989). No standard errors are returned when \code{type="Manly"}. +#' All parameter estimates are performed using equations 4.6-4.9 from Pollock \emph{et al.} (1990) and from page 204 in Seber 2002. If \code{type="Jolly"} then all standard errors (square root of the variances) are from equations 4.11, 4.12, and 4.14 in Pollock \emph{et al.} (1990) (these are different than those in Seber (2002) ... see Pollock \emph{et al.}'s note on page 21). If \code{type="Jolly"} and \code{phi.full=TRUE} then the full variance for the phi parameter is given as in eqn 4.18 in Pollock \emph{et al.} (1990), otherwise eqn 4.13 from Pollock \emph{et al.} (1990) is used. When \code{type="Jolly"} the confidence interval are produced using normal theory (i.e., estimate +/- z*SE). If \code{type="Manly"} then the confidence intervals for N and phi (none will be produced for B) are constructed using the methods of Manly (1984) and as described in 2.24-2.33 of Krebs (1989). No standard errors are returned when \code{type="Manly"}. #' #' The \code{summary} function returns estimates of M, N, phi, B, and their associated standard errors and, if \code{verbose=TRUE} the intermediate calculations of \dQuote{observables} from the data -- n, m, R, r, and z. #' @@ -33,11 +33,11 @@ #' \item conf.level The provided level of confidence that was used. #' } #' -#' @section Testing: The formulas have been triple-checked against formulas in Pollock et al. (1990), Manly (1984), and Seber (2002). +#' @section Testing: The formulas have been triple-checked against formulas in Pollock \emph{et al.} (1990), Manly (1984), and Seber (2002). #' #' The results for the \code{\link{CutthroatAL}} data file (as analyzed in the example) was compared to results from the JOLLY program available at http://www.mbr-pwrc.usgs.gov/software/jolly.html. The r and z values matched, all M and N estimates match at one decimal place, all phi are within 0.001, and all B are within 0.7. The SE match for M except for two estimates that are within 0.1, match for N except for one estimate that is within 0.1, are within 0.001 for phi, and are within 1.3 for B (except for for the first estimate which is dramatically off). #' -#' The results of \code{mrOpen} related to Table 4.4 of Pollock et al. (1990) match (to one decimal place) except for three estimates that are within 0.1\% for N, match (to two decimal places) for phi except for where Pollock set phi>1 to phi=1, match for B except for Pollock set B<0 to B=0. The SE match (to two decimal places) for N except for N15 (which is within 0.5, <5\%), match (to three decimal places) for phi except for phi15 (which is within 0.001, <0.5\%), match (to two decimal places) for B except for B17 and B20 which are within 0.2 (<0.2\%) +#' The results of \code{mrOpen} related to Table 4.4 of Pollock \emph{et al.} (1990) match (to one decimal place) except for three estimates that are within 0.1\% for N, match (to two decimal places) for phi except for where Pollock set phi>1 to phi=1, match for B except for Pollock set B<0 to B=0. The SE match (to two decimal places) for N except for N15 (which is within 0.5, <5\%), match (to three decimal places) for phi except for phi15 (which is within 0.001, <0.5\%), match (to two decimal places) for B except for B17 and B20 which are within 0.2 (<0.2\%) #' #' All point estimates of M, N, phi, and B and the SE of phi match the results in Table 2.3 of Krebs (1989) (within minimal rounding error for a very small number of results). The SE of N results are not close to those of Krebs (1989) (who does not provide a formula for SE so the discrepancy cannot be explored). The SE of B results match those of Krebs (1989) for 5 of the 8 values and are within 5\% for 2 of the other 3 values (the last estimate is off by 27\%). #' diff --git a/R/psdCI.R b/R/psdCI.R index 9de22f6f..3eedfbc2 100644 --- a/R/psdCI.R +++ b/R/psdCI.R @@ -2,7 +2,7 @@ #' #' @description Compute confidence intervals for (traditional) PSD-X and (incremental) PSD X-Y values as requested by the user. #' -#' @details Computes confidence intervals for (traditional) PSD-X and (incremental) PSD X-Y values. Two methods can be used as chosen with \code{method=}. If \code{method="binomial"} then the binomial distribution (via \code{binCI()}) is used. If \code{method="multinomial"} then the multinomial method described by Brenden et al. (2008) is used. This function is defined to compute one confidence interval so \code{method="binomial"} is the default. See examples and \code{\link{psdCalc}} for computing several simultaneous confidence intervals. +#' @details Computes confidence intervals for (traditional) PSD-X and (incremental) PSD X-Y values. Two methods can be used as chosen with \code{method=}. If \code{method="binomial"} then the binomial distribution (via \code{binCI()}) is used. If \code{method="multinomial"} then the multinomial method described by Brenden \emph{et al.} (2008) is used. This function is defined to compute one confidence interval so \code{method="binomial"} is the default. See examples and \code{\link{psdCalc}} for computing several simultaneous confidence intervals. #' #' A table of proportions within each length category is given in \code{ptbl}. If \code{ptbl} has any values greater than 1 then it is assumed that a table of percentages was supplied and the entire table will be divided by 100 to continue. The proportions must sum to 1 (with some allowance for rounding). #' @@ -19,7 +19,7 @@ #' #' @return A matrix with columns that contain the computed PSD-X or PSD X-Y value and the associated confidence interval. The confidence interval values were set to zero or 100 if the computed value was negative or greater than 100, respectively. #' -#' @section Testing: The multinomial results match the results given in Brendent et al. (2008). +#' @section Testing: The multinomial results match the results given in Brenden \emph{et al.} (2008). #' #' @author Derek H. Ogle, \email{DerekOgle51@gmail.com} #' diff --git a/man/WSlit.Rd b/man/WSlit.Rd index b74cb6aa..2d898e50 100644 --- a/man/WSlit.Rd +++ b/man/WSlit.Rd @@ -7,25 +7,23 @@ \format{ A data frame with observations on the following 13 variables: \describe{ - \item{species}{Species name.} + \item{species}{Species name. Use \code{wsVal()} to see the list of available species.} \item{units}{Units of measurements. \code{Metric} uses lengths in mm and weight in grams. \code{English} uses lengths in inches and weight in pounds.} \item{type}{Type of equation (\code{linear} or \code{quadratic}).} \item{ref}{Reference quartile (\code{75}, \code{50}, or \code{25}).} \item{measure}{The type of length measurement used -- total length (\code{TL}) or fork length (\code{FL}).} - \item{method}{The type of method used to derive the equation (\code{RLP},\code{EmP}, or \code{Other}).} + \item{method}{The type of method used to derive the equation (Regression Line Percentile (\code{RLP}; see Murphy \emph{et al.} (1990) and Murphy \emph{et al.} (1991)), Empirical Percentile (\code{EmP}; see Gerow \emph{et al.} (2005)), or \code{Other}).} \item{min.len}{Minimum total length (mm or in, depending on \code{units}) for which the equation should be applied.} \item{max.len}{Maximum total length (mm or in, depending on \code{units}) for which the equation should be applied.} \item{int}{The intercept for the model.} - \item{slope}{The slope for the linear models or the linear coefficient for the quadratic equation.} - \item{quad}{The quadratic coefficient in the quadratic equations.} - \item{source}{Source of the equation. These match the sources given in Neumann et al. 2012.} + \item{slope}{The slope for the linear equation or the linear coefficient for the quadratic equation.} + \item{quad}{The quadratic coefficient in the quadratic equation.} + \item{source}{Source of the equation. These match the sources given in Neumann \emph{et al.} (2012).} \item{comment}{Comments about use of equation.} } } \source{ -Most of these equations can be found in Neumann, R.M., C.S. Guy, and D.W. Willis. 2012. Length, Weight, and Associated Indices. Chapter 14 in Zale, A.V., D.L. Parrish, and T.M. Sutton, editors. Fisheries Techniques. American Fisheries Society, Bethesda, MD. - -Some species were not in Neumann et al (2012) and are noted as such in the \code{comments} variable. +Most of these equations can be found in Neumann \emph{et al.} (2012). Species not in Neumann \emph{et al.} (2012) are noted as such in the \code{comments} variable. } \description{ Parameters for all known standard weight equations. @@ -53,6 +51,14 @@ head(WSlit) } \references{ Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}. Chapman & Hall/CRC, Boca Raton, FL. + +Gerow, K.G., R.C. Anderson-Sprecher, and W.A. Hubert. 2005. A new method to compute standard weight equations that reduces length-related bias. North American Journal of Fisheries Management 25:1288–1300. + +Murphy, B.R., M.L. Brown, and T.A. Springer. 1990. Evaluation of the relative weight (Wr) index, with new applications to walleye. North American Journal of Fisheries Management 10:85–97. + +Murphy, B. R., D. W. Willis, and T. A. Springer. 1991. The relative weight index in fisheries management: Status and needs. Fisheries (Bethesda) 16(2):30–38. + +Neumann, R.M., C.S. Guy, and D.W. Willis. 2012. Length, Weight, and Associated Indices. Chapter 14 in Zale, A.V., D.L. Parrish, and T.M. Sutton, editors. Fisheries Techniques. American Fisheries Society, Bethesda, MD. } \seealso{ See \code{\link{wsVal}} and \code{\link{wrAdd}} for related functionality. diff --git a/man/chapmanRobson.Rd b/man/chapmanRobson.Rd index e71a760f..79990163 100644 --- a/man/chapmanRobson.Rd +++ b/man/chapmanRobson.Rd @@ -111,12 +111,12 @@ Computes the Chapman-Robson estimates of annual survival rate (S) and instantane \details{ The default is to use all ages in the age vector. This is only appropriate if the age and catch vectors contain only the ages and catches on the descending limb of the catch curve. Use \code{ages2use} to isolate only the catch and ages on the descending limb. -The Chapman-Robson method provides an estimate of the annual survival rate, with the annual mortality rate (A) determined by 1-S. The instantaneous mortality rate is often computed as -log(S). However, Hoenig et al. (1983) showed that this produced a biased (over)estimate of Z and provided a correction. The correction is applied by setting \code{zmethod="Hoenigetal"}. Smith et al. (2012) showed that the Hoenig et al. method should be corrected for a variance inflation factor. This correction is applied by setting \code{zmethod="Smithetal"} (which is the default behavior). Choose \code{zmethod="original"} to use the original estimates for Z and it's SE as provided by Chapman and Robson. +The Chapman-Robson method provides an estimate of the annual survival rate, with the annual mortality rate (A) determined by 1-S. The instantaneous mortality rate is often computed as -log(S). However, Hoenig \emph{et al.} (1983) showed that this produced a biased (over)estimate of Z and provided a correction. The correction is applied by setting \code{zmethod="Hoenigetal"}. Smith \emph{et al.} (2012) showed that the Hoenig \emph{et al.} method should be corrected for a variance inflation factor. This correction is applied by setting \code{zmethod="Smithetal"} (which is the default behavior). Choose \code{zmethod="original"} to use the original estimates for Z and it's SE as provided by Chapman and Robson. } \section{Testing}{ Tested the results of chapmanRobson against the results in Miranda and Bettoli (2007). The point estimates of S matched perfectly but the SE of S did not because Miranda and Bettoli used a rounded estimate of S in the calculation of the SE of S but chapmanRobson does not. -Tested the results against the results from \code{agesurv} in \pkg{fishmethods} using the \code{rockbass} data.frame in \pkg{fishmethods}. Results for Z and the SE of Z matched perfectly for non-bias-corrected results. The estimate of Z, but not the SE of Z, matched for the bias-corrected (following Smith et al. (2012)) results. \pkg{FSA} uses equation 2 from Smith et al. (2012) whereas \pkg{fishmethods} appears to use equation 5 from the same source to estimate the SE of Z. +Tested the results against the results from \code{agesurv} in \pkg{fishmethods} using the \code{rockbass} data.frame in \pkg{fishmethods}. Results for Z and the SE of Z matched perfectly for non-bias-corrected results. The estimate of Z, but not the SE of Z, matched for the bias-corrected (following Smith \emph{et al.} (2012)) results. \pkg{FSA} uses equation 2 from Smith \emph{et al.} (2012) whereas \pkg{fishmethods} appears to use equation 5 from the same source to estimate the SE of Z. } \section{IFAR Chapter}{ diff --git a/man/growthModels.Rd b/man/growthModels.Rd index 03f82d0c..bb6a9f99 100644 --- a/man/growthModels.Rd +++ b/man/growthModels.Rd @@ -81,7 +81,7 @@ Take note of the following for parameterizations (i.e., \code{param}) of each gr \item In the Quinn and Deriso (1999) functions (the \sQuote{QuinnDerisoX} functions), the a parameter here is equal to lambda/K there and the gi parameter here is equal to the K parameter there. Also note that their Y is L here. \item In the Ricker (1979)[p. 705] functions (the \sQuote{RickerX} functions), the a parameter here is equal to k there and the gi parameter here is equal to the g parameter there. Also note that their w is L here. In the Ricker (1979) functions as presented in Campana and Jones (1992), the a parameter here is equal to k parameter there and the gi parameter here is equal to the G parameter there. Also note that their X is L here. \item The function in Ricker (1975)[p. 232] is the same as \sQuote{Ricker2} where the a parameter here is qual to G there and the gi parameter here is equal to the g parameter there. Also note that their w is L here. - \item The function in Quist et al. (2012)[p. 714] is the same as \sQuote{Ricker1} where the gi parameter here is equal to the G parameter there and the ti parameter here is equal to the t0 parameter there. + \item The function in Quist \emph{et al.} (2012)[p. 714] is the same as \sQuote{Ricker1} where the gi parameter here is equal to the G parameter there and the ti parameter here is equal to the t0 parameter there. \item The function in Katsanevakis and Maravelias (2008) is the same as \sQuote{Ricker1} where the gi parameter here is equal to k2 parameter there and the ti parameter here is equal to the t2 parameter there. } \item Richards diff --git a/man/metaM.Rd b/man/metaM.Rd index 6526c838..3997b43b 100644 --- a/man/metaM.Rd +++ b/man/metaM.Rd @@ -72,22 +72,22 @@ Several methods can be used to estimated natural mortality (M) from other types \details{ One of several methods is chosen with \code{method}. The available methods can be seen with \code{Mmethods()} and are listed below with a brief description of where the equation came from. The sources (listed below) should be consulted for more specific information. \itemize{ - \item \code{method="HoenigNLS"}: The \dQuote{modified Hoenig equation derived with a non-linear model} as described in Then et al. (2015) on the third line of Table 3. This method was the preferred method suggested by Then et al. (2015). Requires only \code{tmax}. - \item \code{method="PaulyLNoT"}: The \dQuote{modified Pauly length equation} as described on the sixth line of Table 3 in Then et al. (2015). Then et al. (2015) suggested that this is the preferred model if maximum age (tmax) information was not available. Requires \code{K} and \code{Linf}. + \item \code{method="HoenigNLS"}: The \dQuote{modified Hoenig equation derived with a non-linear model} as described in Then \emph{et al.} (2015) on the third line of Table 3. This method was the preferred method suggested by Then \emph{et al.} (2015). Requires only \code{tmax}. + \item \code{method="PaulyLNoT"}: The \dQuote{modified Pauly length equation} as described on the sixth line of Table 3 in Then \emph{et al.} (2015). Then \emph{et al.} (2015) suggested that this is the preferred model if maximum age (tmax) information was not available. Requires \code{K} and \code{Linf}. \item \code{method="PaulyL"}: The \dQuote{Pauly (1980) equation using fish lengths} from his equation 11. This is the most commonly used method in the literature. Note that Pauly used common logarithms as used here but the model is often presented in other sources with natural logarithms. Requires \code{K}, \code{Linf}, and \code{T}. \item \code{method="PaulyW"}: The \dQuote{Pauly (1980) equation for weights} from his equation 10. Requires \code{K}, \code{Winf}, and \code{T}. \item \code{method="HoeingO"}, \code{method="HoeingOF"}, \code{method="HoeingOM"}, \code{method="HoeingOC"}: The original \dQuote{Hoenig (1983) composite}, \dQuote{fish}, \dQuote{mollusc}, and \dQuote{cetacean} (fit with OLS) equations from the second column on page 899 of Hoenig (1983). Requires only \code{tmax}. \item \code{method="HoeingO2"}, \code{method="HoeingO2F"}, \code{method="HoeingO2M"}, \code{method="HoeingO2C"}: The original \dQuote{Hoenig (1983) composite}, \dQuote{fish}, \dQuote{mollusc}, and \dQuote{cetacean} (fit with Geometric Mean Regression) equations from the second column on page 537 of Kenchington (2014). Requires only \code{tmax}. - \item \code{method="HoenigLM"}: The \dQuote{modified Hoenig equation derived with a linear model} as described in Then et al. (2015) on the second line of Table 3. Requires only \code{tmax}. - \item \code{method="HewittHoenig"}: The \dQuote{Hewitt and Hoenig (2005) equation} from their equation 8. Requires only \code{tmax}. - \item \code{method="tmax1"}: The \dQuote{one-parameter tmax equation} from the first line of Table 3 in Then et al. (2015). Requires only \code{tmax}. - \item \code{method="K1"}: The \dQuote{one-parameter K equation} from the fourth line of Table 3 in Then et al. (2015). Requires only \code{K}. - \item \code{method="K2"}: The \dQuote{two-parameter K equation} from the fifth line of Table 3 in Then et al. (2015). Requires only \code{K}. + \item \code{method="HoenigLM"}: The \dQuote{modified Hoenig equation derived with a linear model} as described in Then \emph{et al.} (2015) on the second line of Table 3. Requires only \code{tmax}. + \item \code{method="HewittHoenig"}: The \dQuote{Hewitt and Hoenig (2005) equation} from their equation 8. Requires only \code{tmax}. + \item \code{method="tmax1"}: The \dQuote{one-parameter tmax equation} from the first line of Table 3 in Then \emph{et al.} (2015). Requires only \code{tmax}. + \item \code{method="K1"}: The \dQuote{one-parameter K equation} from the fourth line of Table 3 in Then \emph{et al.} (2015). Requires only \code{K}. + \item \code{method="K2"}: The \dQuote{two-parameter K equation} from the fifth line of Table 3 in Then \emph{et al.} (2015). Requires only \code{K}. \item \code{method="JensenK1"}: The \dQuote{Jensen (1996) one-parameter K equation}. Requires only \code{K}. \item \code{method="JensenK2"}: The \dQuote{Jensen (2001) two-parameter K equation} from their equation 8. Requires only \code{K}. - \item \code{method="Gislason"}: The \dQuote{Gislason et al. (2010) equation} from their equation 2. Requires \code{K}, \code{Linf}, and \code{L}. + \item \code{method="Gislason"}: The \dQuote{Gislason \emph{et al.} (2010) equation} from their equation 2. Requires \code{K}, \code{Linf}, and \code{L}. \item \code{method="AlversonCarney"}: The \dQuote{Alverson and Carney (1975) equation} as given in equation 10 of Zhang and Megrey (2006). Requires \code{tmax} and \code{K}. - \item \code{method="Charnov"}: The \dQuote{Charnov et al. (2013) equation} as given in the second column of page 545 of Kenchington (2014). Requires \code{K}, \code{Linf}, and \code{L}. + \item \code{method="Charnov"}: The \dQuote{Charnov \emph{et al.} (2013) equation} as given in the second column of page 545 of Kenchington (2014). Requires \code{K}, \code{Linf}, and \code{L}. \item \code{method="ZhangMegreyD"}, \code{method="ZhangMegreyP"}: The \dQuote{Zhang and Megrey (2006) equation} as given in their equation 8 but modified for demersal or pelagic fish. Thus, the user must choose the fish type with \code{group}. Requires \code{tmax}, \code{K}, \code{t0}, \code{t50}, and \code{b}. \item \code{method="RikhterEfanov1"}: The \dQuote{Rikhter and Efanov (1976) equation (#2)} as given in the second column of page 541 of Kenchington (2014) and in Table 6.4 of Miranda and Bettoli (2007). Requires only \code{t50}. \item \code{method="RikhterEfanov2"}: The \dQuote{Rikhter and Efanov (1976) equation (#1)} as given in the first column of page 541 of Kenchington (2014). Requires \code{t50}, \code{K}, \code{t0}, and \code{b}. diff --git a/man/mrOpen.Rd b/man/mrOpen.Rd index 7dfe13eb..e2fdcbae 100644 --- a/man/mrOpen.Rd +++ b/man/mrOpen.Rd @@ -68,18 +68,18 @@ If \code{mb.top} contains an object from the \code{\link{capHistSum}} function t If \code{mb.top} is a matrix then it must be square, must have non-negative and no NA values in the upper triangle, and all NA values on the lower triangle and diagonal. If \code{mb.bot} is a matrix then it must have four rows named \code{m}, \code{u}, \code{n}, and \code{R} (see \code{\link{capHistSum}} for definitions), all values must be non-NA, and the first value of \code{m} must be 0. The last value of \code{R} can either be 0 or some positive number (it is ultimately ignored in all calculations). -All parameter estimates are performed using equations 4.6-4.9 from Pollock et al (1990) and from page 204 in Seber 2002. If \code{type="Jolly"} then all standard errors (square root of the variances) are from equations 4.11, 4.12, and 4.14 in Pollock et al. (1990) (these are different than those in Seber (2002) ... see Pollock et al.'s note on page 21). If \code{type="Jolly"} and \code{phi.full=TRUE} then the full variance for the phi parameter is given as in eqn 4.18 in Pollock et al. (1990), otherwise eqn 4.13 from Pollock et al. (1990) is used. When \code{type="Jolly"} the confidence interval are produced using normal theory (i.e., estimate +/- z*SE). If \code{type="Manly"} then the confidence intervals for N and phi (none will be produced for B) are constructed using the methods of Manly (1984) and as described in 2.24-2.33 of Krebs (1989). No standard errors are returned when \code{type="Manly"}. +All parameter estimates are performed using equations 4.6-4.9 from Pollock \emph{et al.} (1990) and from page 204 in Seber 2002. If \code{type="Jolly"} then all standard errors (square root of the variances) are from equations 4.11, 4.12, and 4.14 in Pollock \emph{et al.} (1990) (these are different than those in Seber (2002) ... see Pollock \emph{et al.}'s note on page 21). If \code{type="Jolly"} and \code{phi.full=TRUE} then the full variance for the phi parameter is given as in eqn 4.18 in Pollock \emph{et al.} (1990), otherwise eqn 4.13 from Pollock \emph{et al.} (1990) is used. When \code{type="Jolly"} the confidence interval are produced using normal theory (i.e., estimate +/- z*SE). If \code{type="Manly"} then the confidence intervals for N and phi (none will be produced for B) are constructed using the methods of Manly (1984) and as described in 2.24-2.33 of Krebs (1989). No standard errors are returned when \code{type="Manly"}. The \code{summary} function returns estimates of M, N, phi, B, and their associated standard errors and, if \code{verbose=TRUE} the intermediate calculations of \dQuote{observables} from the data -- n, m, R, r, and z. The level of confidence is not set in the \code{confint} function, in contrast to most \code{confint} functions. Rather the confidence level is set in the main \code{mrOpen} function. } \section{Testing}{ - The formulas have been triple-checked against formulas in Pollock et al. (1990), Manly (1984), and Seber (2002). + The formulas have been triple-checked against formulas in Pollock \emph{et al.} (1990), Manly (1984), and Seber (2002). The results for the \code{\link{CutthroatAL}} data file (as analyzed in the example) was compared to results from the JOLLY program available at http://www.mbr-pwrc.usgs.gov/software/jolly.html. The r and z values matched, all M and N estimates match at one decimal place, all phi are within 0.001, and all B are within 0.7. The SE match for M except for two estimates that are within 0.1, match for N except for one estimate that is within 0.1, are within 0.001 for phi, and are within 1.3 for B (except for for the first estimate which is dramatically off). -The results of \code{mrOpen} related to Table 4.4 of Pollock et al. (1990) match (to one decimal place) except for three estimates that are within 0.1\% for N, match (to two decimal places) for phi except for where Pollock set phi>1 to phi=1, match for B except for Pollock set B<0 to B=0. The SE match (to two decimal places) for N except for N15 (which is within 0.5, <5\%), match (to three decimal places) for phi except for phi15 (which is within 0.001, <0.5\%), match (to two decimal places) for B except for B17 and B20 which are within 0.2 (<0.2\%) +The results of \code{mrOpen} related to Table 4.4 of Pollock \emph{et al.} (1990) match (to one decimal place) except for three estimates that are within 0.1\% for N, match (to two decimal places) for phi except for where Pollock set phi>1 to phi=1, match for B except for Pollock set B<0 to B=0. The SE match (to two decimal places) for N except for N15 (which is within 0.5, <5\%), match (to three decimal places) for phi except for phi15 (which is within 0.001, <0.5\%), match (to two decimal places) for B except for B17 and B20 which are within 0.2 (<0.2\%) All point estimates of M, N, phi, and B and the SE of phi match the results in Table 2.3 of Krebs (1989) (within minimal rounding error for a very small number of results). The SE of N results are not close to those of Krebs (1989) (who does not provide a formula for SE so the discrepancy cannot be explored). The SE of B results match those of Krebs (1989) for 5 of the 8 values and are within 5\% for 2 of the other 3 values (the last estimate is off by 27\%). diff --git a/man/psdCI.Rd b/man/psdCI.Rd index 15bfebd7..147e86a7 100644 --- a/man/psdCI.Rd +++ b/man/psdCI.Rd @@ -39,14 +39,14 @@ A matrix with columns that contain the computed PSD-X or PSD X-Y value and the a Compute confidence intervals for (traditional) PSD-X and (incremental) PSD X-Y values as requested by the user. } \details{ -Computes confidence intervals for (traditional) PSD-X and (incremental) PSD X-Y values. Two methods can be used as chosen with \code{method=}. If \code{method="binomial"} then the binomial distribution (via \code{binCI()}) is used. If \code{method="multinomial"} then the multinomial method described by Brenden et al. (2008) is used. This function is defined to compute one confidence interval so \code{method="binomial"} is the default. See examples and \code{\link{psdCalc}} for computing several simultaneous confidence intervals. +Computes confidence intervals for (traditional) PSD-X and (incremental) PSD X-Y values. Two methods can be used as chosen with \code{method=}. If \code{method="binomial"} then the binomial distribution (via \code{binCI()}) is used. If \code{method="multinomial"} then the multinomial method described by Brenden \emph{et al.} (2008) is used. This function is defined to compute one confidence interval so \code{method="binomial"} is the default. See examples and \code{\link{psdCalc}} for computing several simultaneous confidence intervals. A table of proportions within each length category is given in \code{ptbl}. If \code{ptbl} has any values greater than 1 then it is assumed that a table of percentages was supplied and the entire table will be divided by 100 to continue. The proportions must sum to 1 (with some allowance for rounding). A vector of length equal to the length of \code{ptbl} is given in \code{indvec} which contains zeros and ones to identify the linear combination of values in \code{ptbl} to use to construct the confidence intervals. For example, if \code{ptbl} has four proportions then \code{indvec=c(1,0,0,0)} would be used to construct a confidence interval for the population proportion in the first category. Alternatively, \code{indvec=c(0,0,1,1)} would be used to construct a confidence interval for the population proportion in the last two categories. This vector must not contain all zeros or all ones. } \section{Testing}{ - The multinomial results match the results given in Brendent et al. (2008). + The multinomial results match the results given in Brenden \emph{et al.} (2008). } \section{IFAR Chapter}{ From 7f35c04f482cbda9e2e67d715a36e2bb93b13c78 Mon Sep 17 00:00:00 2001 From: Derek Ogle Date: Tue, 20 Dec 2022 17:54:19 -0600 Subject: [PATCH 3/8] Updated wsVal() to address #89 --- R/wsVal.R | 60 ++++++++++++++++++---------------- tests/testthat/testthat_WSWR.R | 6 ++-- 2 files changed, 34 insertions(+), 32 deletions(-) diff --git a/R/wsVal.R b/R/wsVal.R index f8f60cb7..d022ba18 100644 --- a/R/wsVal.R +++ b/R/wsVal.R @@ -42,39 +42,41 @@ wsVal <- function(species="List",units=c("metric","English"),ref=75,simplify=FALSE) { type <- measure <- method <- NULL # avoiding bindings warning in RCMD CHECK units <- match.arg(units) - # load WSlit data frame into this functions environment + ## load WSlit data frame into this functions environment WSlit <- FSA::WSlit - # isolate only those data for which those units and ref exist - df <- droplevels(WSlit[WSlit$units==units & WSlit$ref==ref,]) - # check to make sure that that species exists for that subset - OK <- iwsLitCheck(df,species <- capFirst(species)) - # continue if species name is correct - if (OK) { - ## get the appropriate row from the data.frame - WSvec <- df[df$species==species,] + ## Make checks on species (if species exists then reduce dataframe to that species) + if (length(species)>1) STOP("'species' must contain only one name.") + if (species=="List") iListSpecies(WSlit) + else { + if (!any(unique(WSlit$species)==species)) { + STOP("There is no Ws equation in 'WSlit' for ",species, + ".\n Type 'wsVal()' to see a list of available species.\n\n") + } else df <- droplevels(WSlit[WSlit$species==species,]) + ## Make checks on units (if OK reduce data frame to those units) + if (!any(unique(df$units)==units)) { + print(df) + STOP("There is no Ws equation in ",units," units for ",species, + ".\n Please see relevant portion of `WSlit` above.\n\n") + } else df <- droplevels(df[df$units==units,]) + ## Make checks on ref (if OK reduce data frame to that ref) + if (!any(unique(df$ref)==ref)) { + print(df) + STOP("There is no Ws equation with ref of ",ref," for ",species, + ".\n Please see relevant portion of `WSlit` above.\n\n") + } else df <- droplevels(df[df$ref==ref,]) + ## Should be a single row data frame if it gets to this point ## If comments says "none" then drop the comment variable - if (WSvec$comment=="none") WSvec <- WSvec[,-which(names(WSvec)=="comment")] + if (df$comment=="none") df <- df[,-which(names(df)=="comment")] ## If function is linear (as opposed to quadratic) then drop the quad variable - if (WSvec$type=="linear") WSvec <- WSvec[,-which(names(WSvec)=="quad")] + if (df$type=="linear") df <- df[,-which(names(df)=="quad")] ## Change "min.len" and "max.len" variables to ".TL" or ."FL" as appropriate - tmp <- paste(c("min","max"),WSvec$measure,sep=".") - names(WSvec)[which(names(WSvec) %in% c("min.len","max.len"))] <- tmp + tmp <- paste(c("min","max"),df$measure,sep=".") + names(df)[which(names(df) %in% c("min.len","max.len"))] <- tmp ## Remove max.len if it is NA - if (is.na(WSvec[,tmp[2]])) WSvec <- WSvec[,-which(names(WSvec)==tmp[2])] - ## If told to simplify then only get sertain values - if (simplify) WSvec <- WSvec[,which(names(WSvec) %in% c("species",tmp,"int","slope","quad"))] - WSvec - } -} - -iwsLitCheck <- function(data,species) { - OK <- FALSE - if (length(species)>1) STOP("'species' must contain only one name.") - if (species=="List") iListSpecies(data) - else if (!any(unique(data$species)==species)) { - STOP("A Ws equation may not exist given your choices of species, units, and ref.\n Please look carefully inside the data(WSlit) data frame.\n\n") + if (is.na(df[,tmp[2]])) df <- df[,-which(names(df)==tmp[2])] + ## If told to simplify then only get certain values + if (simplify) + df <- df[,which(names(df) %in% c("species",tmp,"int","slope","quad"))] + df } - else OK <- TRUE - OK } - diff --git a/tests/testthat/testthat_WSWR.R b/tests/testthat/testthat_WSWR.R index 24082c22..8ec1202b 100644 --- a/tests/testthat/testthat_WSWR.R +++ b/tests/testthat/testthat_WSWR.R @@ -2,7 +2,7 @@ test_that("wsVal() messages",{ ## bad species name expect_error(wsVal("Derek"), - "A Ws equation may not exist given your choices") + "There is no Ws equation in 'WSlit' for Derek") ## too many species name expect_error(wsVal(c("Bluegill","Yellow Perch")), "must contain only one name") @@ -12,10 +12,10 @@ test_that("wsVal() messages",{ "should be one of") # don't exist for the species expect_error(wsVal("Ruffe",units="English"), - "A Ws equation may not exist given your choices") + "There is no Ws equation in English units") ## reference value does not exist expect_error(wsVal("Bluegill",ref=50), - "A Ws equation may not exist given your choices") + "There is no Ws equation with ref of 50 for Bluegill") }) test_that("wrAdd() messages",{ From ba8b48f6307d51526a65a06ac340277a454ca71b Mon Sep 17 00:00:00 2001 From: Derek Ogle Date: Wed, 18 Jan 2023 17:09:21 -0600 Subject: [PATCH 4/8] Updated WSlit and PSDlit with Redbreast and Spotted Sunfish --- DESCRIPTION | 4 ++-- NEWS.md | 3 ++- data-raw/PSDlit.csv | 8 +++++--- data-raw/WSlit.csv | 8 ++++++-- data/PSDlit.rdata | Bin 2240 -> 2307 bytes data/WSlit.rdata | Bin 5185 -> 5304 bytes inst/helpers/installTester.R | 37 ----------------------------------- 7 files changed, 15 insertions(+), 45 deletions(-) delete mode 100644 inst/helpers/installTester.R diff --git a/DESCRIPTION b/DESCRIPTION index c6ab2510..84ba5afd 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: FSA Version: 0.9.3.9000 -Date: 2022-2-15 +Date: 2023-1-15 Title: Simple Fisheries Stock Assessment Methods Description: A variety of simple fish stock assessment methods. Authors@R: c( @@ -50,4 +50,4 @@ Suggests: tibble, covr Encoding: UTF-8 -RoxygenNote: 7.2.2 +RoxygenNote: 7.2.3 diff --git a/NEWS.md b/NEWS.md index 2dcbe6b8..109a35e3 100644 --- a/NEWS.md +++ b/NEWS.md @@ -13,7 +13,8 @@ * `alkIndivAge()`: Modified. Added a catch for `NA`s in the length sample. Also added a test. This addresses [#88](https://github.com/fishR-Core-Team/FSA/issues/88). * `confint.boot()`: Modified. Changed hard-coding of columns that contained the confidence interval values to find those columns by `grep()`ing for the `%` sign. This fixes an issue related to `car::Confint()` returning the `coef()` results for functions that have a `coef()` method but not for those that do not. Also updated tests to use results from `car::Boot()` rather than the old `car::bootCase()`. -* `wSlit`: Modified documentation. Described the `RLP` and `EmP` acronyms and provided references for them. This addresses [#95](https://github.com/fishR-Core-Team/FSA/issues/95)). +* `PSDlit`: Added info for Redbreast Sunfish and Spotted Sunfish from Bonvecchio *et al.* (2023). This addresses [#100](https://github.com/fishR-Core-Team/FSA/issues/100)). +* `wSlit`: Modified documentation. Described the `RLP` and `EmP` acronyms and provided references for them. This addresses [#95](https://github.com/fishR-Core-Team/FSA/issues/95)). Added info for Redbreast Sunfish and Spotted Sunfish from Bonvecchio *et al.* (2023). This addresses [#100](https://github.com/fishR-Core-Team/FSA/issues/100)). # FSA 0.9.3 * Moved `dplyr` from `imports` to `suggests` (needed because functions were removed in last version; however it is still used in some examples; partially addresses [#87](https://github.com/fishR-Core-Team/FSA/issues/87)). diff --git a/data-raw/PSDlit.csv b/data-raw/PSDlit.csv index acc6e66d..a8bcae0f 100644 --- a/data-raw/PSDlit.csv +++ b/data-raw/PSDlit.csv @@ -7,9 +7,9 @@ Black Carp,15.75,28.25,35.5,46.5,58.25,40,72,90,118,148,Phelps and Willis (2013) Black Crappie,5,8,10,12,15,13,20,25,30,38,Gabelhouse (1984a) Blue Catfish,12,20,30,35,45,30,51,76,89,114,Gabelhouse (1984a) Bluegill,3,6,8,10,12,8,15,20,25,30,Gabelhouse (1984a) +Brook Trout,8,12,16,20,24,20,30,40,50,60,Hyatt (2000) Brook Trout (lentic),8,13,NA,NA,NA,20,33,NA,NA,NA,Anderson (1980) Brook Trout (lotic),5,8,NA,NA,NA,13,20,NA,NA,NA,Anderson (1980) -Brook Trout,8,12,16,20,24,20,30,40,50,60,Hyatt (2000) Brown Bullhead,5,8,11,14,17,13,20,28,36,43,Bister et al. (2000) Brown Trout (lentic),8,12,16,20,24,20,30,40,50,60,Hyatt and Hubert (2001) Brown Trout (lotic),6,9,12,15,18,15,23,30,38,46,Milewski and Brown (1994) @@ -38,6 +38,7 @@ Palmetto Bass,10,16,20,24,28,25,41,51,61,71,Dumont and Neely (2011) Palmetto Bass (original),8,12,15,20,25,20,30,38,51,63,Gabelhouse (1984a) Pumpkinseed,3,6,8,10,12,8,15,20,25,30,Gabelhouse (1984a) Rainbow Trout,10,16,20,26,31,25,40,50,65,80,Simpkins and Hubert (1996) +Redbreast Sunfish,3,5,7,8,10,8,13,18,20,25,Bonvechio et al. (2023) Redear Sunfish,4,7,9,11,13,10,18,23,28,33,Gabelhouse (1984a) River Carpsucker,7,11,14,18,22,18,28,36,46,56,Bister et al. (2000) Rock Bass,4,7,9,11,13,10,18,23,28,33,Gabelhouse (1984a) @@ -52,8 +53,9 @@ Smallmouth Buffalo ,11,18,24,30,37,28,46,61,76,94,Bister et al. (2000) Splake,8,10,14,16,22,20,25,35,40,55,Hyatt (2000) Spotted Bass,7,11,14,17,20,18,28,35,43,51,Gabelhouse (1984a) Spotted Gar,12,19,25,31,39,30,48,64,79,99,Bister et al. (2000) -Striped Bass (landlocked),12,20,30,35,45,30,51,76,89,114,Gabelhouse (1984a) +Spotted Sunfish,2,4,5,6,7,5,10,13,15,18,Bonvechio et al. (2023) Striped Bass (hybrid),10,16,20,24,28,25,41,51,61,71,Dumont and Neely (2011) +Striped Bass (landlocked),12,20,30,35,45,30,51,76,89,114,Gabelhouse (1984a) Striped Bass X White Bass,10,16,20,24,28,25,41,51,61,71,Dumont and Neely (2011) Suwannee Bass,6,9.75,11.75,13.75,15.75,15,25,30,35,40,Bonvechio et al. (2010) Utah Chub,4,8,10,12,15,10,20,25,30,38,Black et al. (2021) @@ -64,6 +66,6 @@ White Catfish,8,13,17,21,26,20,33,43,53,66,Bister et al. (2000) White Crappie,5,8,10,12,15,13,20,25,30,38,Gabelhouse (1984a) White Perch,5,8,10,12,35,13,20,25,30,38,Gabelhouse (1984a) White Sucker,6,10,13,16,20,15,25,33,41,51,Bister et al. (2000) -Yellow Perch,5,8,10,12,15,13,20,25,30,38,Gabelhouse (1984a) Yellow Bass,4,7,9,11,13,10,18,23,28,33,Anderson and Gutreuter (1983) Yellow Bullhead,4,7,9,11,14,10,18,23,28,36,Anderson (1980) +Yellow Perch,5,8,10,12,15,13,20,25,30,38,Gabelhouse (1984a) diff --git a/data-raw/WSlit.csv b/data-raw/WSlit.csv index 7a38e25a..856b5808 100644 --- a/data-raw/WSlit.csv +++ b/data-raw/WSlit.csv @@ -127,6 +127,8 @@ Rainbow Trout (lotic),English,linear,75,TL,RLP,4.75,NA,-3.432,3.024,NA,Simpkins Rainbow Trout (lotic),metric,linear,75,TL,RLP,120,NA,-5.023,3.024,NA,Simpkins and Hubert (1996),none Razorback Sucker,English,linear,75,TL,RLP,4.25,NA,-3.35,2.985,NA,Didenko et al. (2004),none Razorback Sucker,metric,linear,75,TL,RLP,110,NA,-4.886,2.985,NA,Didenko et al. (2004),none +Redbreast Sunfish,English,linear,75,TL,EmP,3,11.75,-3.2727,3.1215,NA,Bonvechio et al. (2023),also used RLP and EmP-quadratic but did not recommend +Redbreast Sunfish,metric,linear,75,TL,EmP,80,300,-5.0018,3.1218,NA,Bonvechio et al. (2023),also used RLP and EmP-quadratic but did not recommend Redear Sunfish,English,linear,75,TL,RLP,2.75,NA,-3.263,3.119,NA,Pope et al. (1995),none Redear Sunfish,metric,linear,75,TL,RLP,70,NA,-4.968,3.119,NA,Pope et al. (1995),none Riffle Dace,metric,quadratic,75,TL,EmP,60,NA,-3.706,1.685,0.349,Giannetto et al. (2011),only from Tiber River basin (Italy) @@ -148,8 +150,8 @@ Sauger,English,linear,75,TL,RLP,2.75,NA,-3.671,3.187,NA,Anderson and Neumann (19 Sauger,metric,linear,75,TL,RLP,70,NA,-5.492,3.187,NA,Anderson and Neumann (1996),none Saugeye,English,linear,75,TL,RLP,6.75,NA,-3.76,3.266,NA,Flammang et al. (1993),none Saugeye,metric,linear,75,TL,RLP,170,NA,-5.692,3.266,NA,Flammang et al. (1993),none -Shoal Bass,metric,quadratic,75,TL,EmP,120,NA,-3.052,1.225,0.415,Bonvechio et al. (2019),also used RLP but did not recommend Shoal Bass,English,quadratic,75,TL,EmP,4.75,NA,-3.169,2.391,0.415,Bonvechio et al. (2019),also used RLP but did not recommend +Shoal Bass,metric,quadratic,75,TL,EmP,120,NA,-3.052,1.225,0.415,Bonvechio et al. (2019),also used RLP but did not recommend Shorthead Redhorse,English,linear,75,TL,RLP,4,NA,-3.337,2.962,NA,Bister et al. (2000),none Shorthead Redhorse,metric,linear,75,TL,RLP,100,NA,-4.841,2.962,NA,Bister et al. (2000),none Shovelnose Sturgeon,English,linear,75,FL,RLP,4.75,NA,-4.266,3.33,NA,Quist et al. (1998),fork length @@ -165,6 +167,8 @@ Spotted Bass,metric,linear,75,TL,RLP,100,NA,-5.392,3.215,NA,Wiens et al. (1996), Spotted Bass (alabama subspecies),metric,linear,75,TL,RLP,100,NA,-5.598,3.2904,NA,Dicenzo et al. (1995),min.len not made clear (assumed same as Spotted Bass); same as Alabama Bass Spotted Gar,English,linear,75,TL,RLP,10,NA,-4.388,3.431,NA,Bister et al. (2000),none Spotted Gar,metric,linear,75,TL,RLP,250,NA,-6.551,3.431,NA,Bister et al. (2000),none +Spotted Sunfish,English,linear,75,TL,EmP,2.75,8,-3.3468,3.3343,NA,Bonvechio et al. (2023),also used RLP and EmP-quadratic but did not recommend +Spotted Sunfish,metric,linear,75,TL,EmP,70,200,-5.3739,3.3343,NA,Bonvechio et al. (2023),also used RLP and EmP-quadratic but did not recommend Striped Bass,English,linear,75,TL,RLP,6,NA,-3.358,3.007,NA,Brown and Murphy (1991b),none Striped Bass,metric,linear,75,TL,RLP,150,NA,-4.924,3.007,NA,Brown and Murphy (1991b),none Striped Bass (hybrid),English,linear,75,TL,RLP,4.5,NA,-3.448,3.139,NA,Brown and Murphy (1991b),same as Palmetto Bass and Striped Bass x White Bass @@ -174,8 +178,8 @@ Striped Bass X White Bass,metric,linear,75,TL,RLP,115,NA,-5.201,3.139,NA,Brown a Suwannee Bass,metric,quadratic,75,TL,EmP,110,NA,-0.106,-1.363,0.995,Bonvechio et al. (2010),none Tiger Muskellunge,English,linear,75,TL,RLP,9.5,NA,-4.095,3.337,NA,Rogers and Koupal (1997),none Tiger Muskellunge,metric,linear,75,TL,RLP,240,NA,-6.126,3.337,NA,Rogers and Koupal (1997),none -Utah Chub,metric,linear,75,TL,EmP,90,410,-4.938,3.031,NA,Black et al. (2021),none Utah Chub,English,linear,75,TL,EmP,4,16,-3.335,3.031,NA,Black et al. (2021),none +Utah Chub,metric,linear,75,TL,EmP,90,410,-4.938,3.031,NA,Black et al. (2021),none Walleye,English,linear,75,TL,RLP,6,NA,-3.642,3.18,NA,Murphy et al. (1990),none Walleye,metric,linear,75,TL,RLP,150,NA,-5.453,3.18,NA,Murphy et al. (1990),none Walleye (30-149 mm),English,linear,75,TL,RLP,1.25,NA,-3.431,2.869,NA,Flammang et al. (1999),none diff --git a/data/PSDlit.rdata b/data/PSDlit.rdata index d736973e2c275b41d348a2060c35f5ee043e75a5..cdff6ff86a260abfd907705000dd2c5c24070d6b 100644 GIT binary patch literal 2307 zcmV+e3Hs(4JhkSlQrel`a-yR!C7n}F!ILO z#>iewcB`_D)SxMiW|X;@Vqc=akv!&SBF^fRaDhJbG|$IC+3F?tkx2u6TbRBP>;9SMoZ%WayS4GevY(!ygKaQGqCEnW;u21-I#TE6I z0$o?D5IT^c8T}zb0@3L>GIiv}Qb|R7PS_-@Phk>a$34Fh0zYdghf(jYPF1gZE zIeblfH_Sahvo)-?4m=)eAwf?ymf6c18_;HqsIFDwVAlsXyxJ6wXKy<7mJB44$t%?J zq|0*tVzudbI_#Qo+rFn<39sS$O0@xhgR6d9Z)HlFUyY-v8TcYH;uu+XMMUk(I?1mG zGHf1*NCx&=5Vz5Ts`K;DB52sPW?GY6)pr}xGt3UCAqhjHrPV+xp<3)^yj*|gw}dCr z;eid&l7^l9h6wg0jgqnmmm9vf@A;v$t0F+o&*QKqT{rgjVQ=V#AE@L4UD;OZWK%R6 zuFU4TDO}f4IYqH@=6ldqa9c)^pY=R$fOg3boPEa=Zd&=xxZPlwy+NC@xW^7Z}Hi|^`|y5W%OwVH_cQ`zfB z++kXWTGJP9R(Dh(K{5;~2z4DocChBS@QUc+J;=M<42Kf zWR(Yj^}3n|WY**JS`;`P(zZ*@!`;A1r@v@!9=||`?0*x9 zroGyXcTv!7m1?h`ecK{9N*o0~ZZ<*8I}M<5)(s}$rVQ$k%Oy}P+y22FHSlV(GCuNv z%!4aOaAHx7nLgOdwsrI=f7MOsZQ{p?{+93p@g<^tL~TY3i~=7&MSjvU{c;MqdyK!y zc*M`+DlvYV=sdFv{0@r?dVLHBKf>Yx|H)tU=dY>$myp{#`lR)kWcoaB;K;*bdL_og z?g+#C*gov@esTQ~bKk<;!0$p5QF)f-`EA+sgL=b0^igk{QS8IsG|?$WfzL7?eT84x zLw!*f#LxS|^?pb65%h>YB)Y`>LvNhns59?3_#D%}$K(*-%zG1`W&6PAh>oy0<{5vh zn^2p0)EDui52y8u&oR3{EzAx4J*4;-S-vapO?#*}{E+^i|D*rC{x{GxU)YB|FxJ%~{ z`u2e7FESn!^Ne{#9FG`}bC=&Yh#z&Cu}nEo#Pubkv~OW-!s5M4Jo<(DB7XD%_hpGu_~qXhXy3xzAfG4yN@#`r zq7Kh((;w;%KNRn=xtTHdQ+vq2OcZs(IRO8-@3Fr|bjkF0EPj5jt#%E)@-yPULq0^m zV>sp*da#T5`ThBT@n6un0)0Fm=ZguB_$p;HUgQsZJP(|6h!6b$kME=C7wU)j(FcAG zWB%X|dE7JQEPmd=K6@Ud{kp^CY?c@IcZcCqr^xg3hEskXf}dskJgySMA&2t>a{{}- zF|SKZ4|bOrkNM%>OZa*74UQ=~o~1qgq}WhP zpP2fzc})5fBwL~wN6C(+{j`58WBWc`PZ6)Lr_~xDdvipK6#s3qKgRMZl8;g1Q6C+L zzI&F*_a8`hgXBtG^-njc=}nTKpgb4I|1zxwinprcAe~!eAGKYf7=KVpknC$(bF^0= z`=gY{B+(r0Pmx`n-y&Tv6Vi2jk} zf294N%9QU<34Qh6)Pr5bkMj=a_y*%=%y`SM$bS00f#^2Fk^eI$_m<+pIgR+x5Ab-7 zqF<;l;zu7gyQUl{{2>qa_m*Y8z{ov46j5ZCW~Zm8@l)XVW>dPI&=y|9-gaEq3HAQW z9QF$pC)B@z%g7e)l&L#f6}!@H`f(`j(#+E0ytt^<(={)%b5dUU6|W(KP`@5fvgg5P zWjU5u*Qq>>cV&<`nn{04eeSsOAZ$5D63HtNWqWDxyd;~M^NsyFEv>d!jiW%ux~5uc?yO>5eppT{ zEoG7^?fm(cE;r_9R5lp|ozn9-=rmbN3bkh37GBm}+W)b&xb1t%z`c;tJv^$$1sZVr z&aY}t@=}Jky63dwd+%hu>G+wLW-ri!OP&ue_gzU1_x2p=HjXrxKCa{^q%gB|kw7m$ zfoEAgX*w@w>74FddScE|Jh`H2R5y~B)R~ArYDoWoH|JUQE_8~W@)SAAqxl~Uu6m*^ zLwYRd@ER@b$hsr(0;?~K{CaE3!E4xoKgGgzqQf4N>jZL727zp3u(oXbf!K9r59^Nt zUmZO?fK1oh+;oa$O!u&!=@f(M6u1`paZr~$-@2>c%q95(9B7D0Ozj1#6imEd9hy@p dk^YO|ivC{n;lqc&EAn5x{s%T(ACb)=008z5wOIfF literal 2240 zcmV;x2tW59iwFP!000002EAF`QryTDmc9PrXILma8zAerl!fw+f0hgb-UVl|2L^lW z7~5mUyV%*S%8Ho=sYWABT9PUDCi5D}Wu7A!JV5dexyp5(AWkXnk^7-C9>_jk(Ibie+88sJIK(C2LTljpmx~#Y*&= z;tJ{-Q`Z`;j;_d7?6vH=boP8N*de&E>g~9~Y1=hN_6QqOST{^!*IrGww;ewu{L@w6 zX>}Cn`&xz2p%l&Oj}RiAUe6PmBR>&JD&F=Ym#_hi?RdUVd~sEVVaI+gm2ZM#sM{7wOyE1fQBaX4hz7tdX@=it{ zOA)!dPAsIoCX+6DQ1^cR*^zC#>1H)4)I+~50>kW(8j>h7T3VMv2{n@-=jH0Nu;T;* z9UfYDI>N9sy6(swL8GKB!sU7x>;z#X?7Ab7^YbL?2;WbF9oQRr5lWSu)RpaIP8v?T z?TdV_4afIAl~bH3XJG(s1$RXphk4JF256T;>Fs!d<7bs`B;8)e3nC#C(Y>1B(i_8WWC^du6IRK5bYd8fPLbf1RNTFuF&xN;YfYqer<%S8%`5gs>!%5APKB%j#$p7bdvF7Cbcjp@PDtG z(hAG(+p6gY^@V-Z73;h&;9;*!be2)z6~?2F@C$pWE9!#yc^|mmZ;3vD9?|eUaANj)HeN(TxO0>r8 zR{JJhV0y3%9QD7;`0>B%zkeLr2Y2Y4LBGCc`iqPQ#oVIq3yk6%<@7Lk zg-I>U3GA-)&3KSsy=wY{9N9fGC;U9;^}f$2>VSSdVC$&gifJ$NhdE+#W%K!v#qpTr z9x=OXeUmOSJ=jHD{2T!P3#Ny8DE}*c?jWvZqUay;E;AhQBR{^M?huatpq_{qb;rKr z&#^M&*>m5*oFI=!|4iv>-^~BHZTdr<$?lOkLHx)QJnSLwDpAx2`yBqUud%*HbjkF4 zBz}Hw)%u2B^%?O`kq^WVe8c2Whb4yde1Sh;bcs>en_;-k)_GrUvvvOb zA*%jHEd9fjW!+Gho?@*rp{^(D(r>@|4Qf0y1MjMfbDB>-#eKzeU#+#eI|PPULbb z=GMpQLS0Hkm&xz%Nmj>P?5ltMw2me6TOzp%pBk7n&S8dVf!1cprp|4V?w5(`t4guY=lU~6_lV&RVN+yhihNAu>Z1L>C0k#r z>mBK8Y${im3dLBV_;h~yt?(h`ruB8-^$z)7$3c2u!4}oDK`}~obx78wYnj%j$UbI4 z(@$w_k@C{B@H@I+C)@h|CHX_IrYWC?WcMcZ?*?6%(d#)sYuR(^hy9dp5?@b~s%{aF z_u?Okr~QBAx%`6U9+4b)*n161>rLXnXa2tWqX&iFYq&gv_&n?3i5ZZaHsKVx!l5GU;In-tGO^au4s zyr_GlZ_0r_WjOMn_aw{w5Kwrs=ftsHs+7xR`~)}IaE0HCY$s^jo1X7`kzSvh$9i$q zi}Y{FBDNiW*3=!ZJ6pnc!z2=RX>Mt8!8xnd4{M%hWppKI3mNI3)D>Pn1wO6{CuKR2 z%Ij2~BwIqJs&m_#2$KqCE=p!_W@oapbd7r8lqO8)DQslaDXRFMw?SWC9I z_YMr6q1Y48qz3b;DaCKH$X_L1kY+P?y0Kqn<<|DWz#{>DLqeQ&t)wsHr=G_1bKH^|0mXxZWyEk_d*DN@1PoIXuyY8e%bWW zj~l$zXBfJ39snXrPuBKt|PPAMv%mp*Ibe2HRKVm0YJ#PH|*M&S~ z8cth2wq%*xYw73O!==hcjoJTid!5hTnNFx9PYv@^`{lstiijSp1$=;tdZOhCeC8R5 z;;_}3_3%M*=#Poxd-2{OQs_ysEu<9f9M%=xP&!+_IK&2H8LGqM5Fpd7E;pSaIn#&O zq3H~R=?u6Mg^6qlo^Q+7->=eq0S>jD*qPmysuWCoP#t@-D3N}5zo_3x-oJnUH%0!V Oum1uLe7Aem3C|?C^+RLL58BvccE^^AbA`?3j4V;9w4eF(Dy@ zM=;@$gkT$UnwwO0Ro8U)_8dDidnjn%8-CUQ)m8ugzvhYQvF-c&{QLbLk7t#qVdYBC zszyBYG=6M+=eo@vjKPx?o(7MPAonFCQNs(vcoM>&HvD;r5d+)cBm}ZBkkOM2wR+ox zDyA?lkpXH>Qxq*D3;=CH%xBn&;5G>)K^6%8AfM+O$x}cD!U)JihHBlWrZh1n460y8 z66ML#=wQD%nSmgU&r)*?*M@OfMaK;E>k|_|Qp(`FI6fgkn1gCw=mnCrwl3rz7qX;a zN$R(trC$Q6DJ+yE5ml7LhA4JG1-YCEOCdW9qy|Ju7=!7IqH+rb`z0MZjq8F77`TL< znu2O^l$W3;MM)|$Z|hgZ^dywToc+0Ccz?gDC{tF~3{cFtij`O#)onpx3+8eF%byWJ zLCdN#Ezi}m8#Y3*Y%dAQ7HgHFh3zz@GLoO1Zj)s$rm0_7lZyK+Gyo35G>{dm(pm;G zK$L}1k*F3*C9$m-E6Y%F*uMxG?;*-W)d?VF6}cp~aX`$c6t_uWK*{2D*0^^;br0y8 zmQfX;xhuRJy=yLAshk|zp{q*Hktka@aulj5Qz_w{lA$XW=vId=L2AP!jio=Uii_bp zb}HELKuuz!BvLrs`A)4nRhZ9A0}ZOec2&=oMh6GQ!-s*ICdocs0v8%oq%o8C!A*)M;%2)*5ZlLoiAUV=#Q9^K7ktbyZt6~t8#O=nK zk{rD4R&-e-5x*NJ6j&X)gQNFlL=9Tqu_z_7ThC8HNz&y>DD+OiEEa2R1+)$utuji~ zMo|tz)l$Ao5`!ZmQJ5%!oCwTwM#NlBg6oD!U9yE(XhczQP^u(-PQg;hunn0NWm%an zPwL>L#N|4ew(7YkB|Hk!Y01#MQ!C5qvxD z(UK|zdCidqixOHe0RpumY+y{Bn82>S9i$*_p58Wos30%2wIjU&dlmPl97tB6uHfA%>edw^; z`8_(V%WV36_lf3(YYIshOxqpthb1@-`v3I{E zk@gF*8!|wV)~?+J)k0B@P;L!RWi9ib}nJjV_3&kdSG26Y;o!in_qlT)fkDw@ zm)ohICI{X5TpNR?{7$-)Pv_j+*Obe-U$OPhbtf=r`h$nfFJRCqe*=Rf9{c&ub8e1X z)2{?JH~q+z*STLYJ3IMw?(ejtb8gzfnb*woO}}>R8+(<_i|O5Yj#D1z-1+=gcJ5Wb zbYJh3+nEO(>pf<@nPTgY2nPK0Z^?g~zi!HH&db60p#yZE3Srs(9Q<&64AFhA8FHs@ zVsai~>n@ACuan&IwZixnmC3hI=bCbtUC;7b-TC;GXJPi;5~uQg&VB#*XC9$+Gp{&c z&F7nS;NZgOt!Dn_8L#}gGnrf}n%DC@-#TE~=ftnk^$aex-sbY>nV%j650htp&z0SG z1KW3IdFpAt(8KBtYRRa7EBxduD73#Vx%<(VQD^$&C*0=Uh)(zZ{T7Tuk=O4d0~9^+ zyX#u^HlU6#eS7>Av>o|>^5vVKO}v33J8yaMufMwvvHFXmUm|~hh1xGfpY4$U5_ONi z`^p_VzK%L~-Afjt@DGgxkpIB8YtGyCQxtxV42DsVZ1=mLqs~K*;Rkl2$Uj_qPVC5U zP%GQ7dpkYP{}|n``EmarNDD0tUpc&F1)6l(>ai2T34 z9&6|n6rQH*f^47Y2$`-#fuj##K@}8ce205qyZgFJ)}q!V+3+3I#pG^b>$>`kKjb5) zN!Ox|n~V)n=nTTptEjV`804Rj?+X__y*2%D)bXc#8@j)!qad-$8PB5#(|dquy=5)LCm#5L@NpC%RDQC$|#Ap}>92FAO(16Zx2(!r!9jhyUZE zgP;7j+fk6|FUt517~*b6k=PNe(fk~FTga}v{~2}ufWn~RZat`zJs&*(fm{FNlK(K^!Wq6u`$4PE>Hvj?IsX8#ZoxA%1v zou%^lZ!{bWg|8(Kd}z+R>5CfnthZ1b+uzId+itk+0pw?Pimah=5HOBC3q>BHei>r? zM1jFL@*D1PB8uEe?cDJ-qV91XAK@De2SNeOaA*|iqI3Vx$lpT+KG$EdS9&5O(`f$6L=}M*ax3u=wyXetiiepitP159U7+ zW)J@_?j(vt4b;zO-!{Ty-R%3+4`<(H@eyM7h_d)>!9wE$Q77|%AM-yik?z!AAV2e$ z@E=n9v=eq;`Ww{qAi6Od=IsKgt*%b3NpJ#uc!WL z=EZgvm#fbv4m*PU_r@X}jd$Vv^Yl%z=k7;-<{x3kkJFBh`RosgI=5Ye+L&FtnP2#C zHv~puWH>Miv31d(Q@aO9+Wzu4QJBTKe}|D^c)SN#d_|ZXA(rpDCM9Bd6lD3QlgZ~N zvw^=rkt2qJP%Fz@UF#_y{%tJJG@e9GKmj7=!TV8!JEm+F&4|J{Z^a_FV@iU+9wak!Go8GDiHG47}QbLfSmSFdP0%3&Al z6TCFvm@xJ_WhrS1l zJj7x1C5Ayb^wRuYfNY%!*}1V-`|r8`HxIrY_fk7^=riSGFigDoqNn3NS_cb|oo7PR zZYK27d{Tf$I#^kNANlB4e>MAe++R3k_A;SUULVaX9Qv6)P3V2&+8Yk-`#lZ=G@o+l zGsn(>YzjNrL^@Bq{&3_!OSz+XXc#9(*cwErDS`;?X zc;&E});kWpw9nzt$Ku3{(jR;;U%B;(pO+xaHQkkLE27{f66*bGVAa*elFF%`9J-&`;|bhu+VheBUdl&BT3d zJ%`Mm494Cw+NqJa&-4q^Pgq`Ld7eZ6Z8sl2B%R8k*J#^x4xM&%z}eSW-1tqt8T8V= zj62e7TO=o{u#|zu~~YLjXg^5tu^oe!YS{sANrqLW`9cWhfDqa#2ou0TF2_X z@4xOf0(TCc#dqv++7HKmN&C#Q`#te;$K(0#cVf>fp3IZi-WD^iV@Ds1-Nx$H&nPUr z&%a-1G(;w>o4k+Fe7tCOdy(ejrS_kNh93JrhwK^@$sSkWm3|u)Xn@i=(dX~C?767x^c^vb4oqBX#@Ys&OuSifUK3au@YK%7-XSpHL&v zETO*`Pb_qH#S6MzPuNf)IkHpCXDqVeozJa==58vk&NWuI-YJ1Bo=h4$oHvgp>=*Ri zvRw&MbO=a!sAa6g4HXf4n7{@{B9zj^LGgmk6)&jPjkBBMZ^ zvKQcU7LwLQi>jj)?K=j@-L7Oy8&-zCx*v#>I$gl+VmqPyifS@`IH60rY;`d1eybBA zeb<4raG$%lD{9F9{fc}LrZS?Hys583a))tSaaCtgN|{z>`Q2En<*0h2bYCyt1$`8~p|s+Xp^x?|znjxywG4H@2}W1^Lz&6SXf ze=F0uOOXIkl)DN_QtR76a;t>*iMVre?G#FoN`Bc@HK#SDq*|-YQq$dixbR&DYw3(s zrRX^z*)NLa=wbR&Lf5Ymi<2UZRksUyDVl3EZ>fx0^|w7W8er+ZMyxtN^+TL$@;GBm zu zR^W;}haI9A3N67!H0LG05@fAwyykX6!9$_p8g z7O+Q7YMFH^lmPLrq=;RuSIg*zr3DqHlx!B_{(-;wXsINLnt|ZJYoiZQ89?vc8bEC}A;$T;7Ri zenoA*4s<3~{1nSMh0<2^)tB>J-dlS4oVRh#cYPqKq-WUr#X)W5)!+6mx*_^5IwP^$ zD2L|0XRZ!gEQdmN4v_MS(NhSPlD4i8OG#Z5(qh_Pw(9Y41vz?LOp;E>h?69Z6C-~u zb0UBT=sFQwplEG>3gz!J&DQMIVgQ-;2)#`fN9=qm`j9MC_B_6L}xJH?L+u)%W@DM{bv+J(8 zMwdm62aGn8GdEnN!U^+{EChL7H7QN>n#zeZZut}(vP@Trydi7MOA6n;fy9fsUtZBw z{?6rQ8ZnwvmXt&sm&#%y{b`_q^%E)<)3MyAD%0yt@ySn=ue=if=l%EJ@5A6d^7%ho K*>~9|h5!JY*{I0? literal 5185 zcmV-H6u#>piwFP!000002JJlybR5-nYg?A~tKaVGWAhmTA#xKNfB4vS%C04U1eS%+ zx;7+~_-Xe^J7{)hnVqo^H3>Nh#rpM(%fODTsE z%AqYK3E>+81p0pSG&4K<(e6qs2kV^WzIX4NcmMakx5w;^@4RSB@S>p4=Ue4#S+UZ$ zYBe7ET8G9qZ9Z$W&)2dN&sO+ad;ttsA51B-fmcTHB!WMk_;W8OhIYUy2vlh(Yff;~ znjMq6oW{6R7U+3H(~PV%1dK_!kmW1FI}|VhazGjcg@V{fod!CPMnM5`RL2fIZOCb9 zj}B%PS)D464iCyxSqL(im!9XiPK?WGCKh1OoSX!TRsr8D@F@ktJk$%)08o^54I%e? zkQEJU(zpffg9=DbW2F>@n4&B;!mvX+$meBP4%tnS29OnL9A>hbE<6+-R7~i0t_Lne zaVaxB4fWC}KS52&ic(_UIjGB-DX7SK=X0g-!9iWqrtP5_Vwn9pHezX1pACgQn9l>O ze^v(bR@Su{wb;sD*cij|y%eb0yj6}Cb<(uzNI}Bgp-VBQbxZSYF;-eS5|Jvp+=)U~{;P__%?7}V32QKGvQYAc@T_JA%!>ck|2mp{9S zOX0hBYuNEXpTJH@rE$88ojP{wu#lYr2GpgUx|u7F4)2k#yb|acQtUHjaFIQll7XZr z$f{uFi41e3J!ZXdkv%#j&zh=DEl*^4n3OEsY>2kj!eKL)pP;pF-XWgIw!PXkP$Bf5 zhDX3Ol!l?M+d7d4E=X~X%q+rmi~xNK^1d|R8B3tMD7ugaimQy4B!ovabxPH+DfWP} zxP91Cic7YAnyDHjV8 zRkfMQq;5eG zSu1v(qDxbvm~t2@v_7W{6PI;y9x6i$rDVnOl~s^aI0u#D*5Zxswxb;vq|%gFiXz34 zW?Tc}NE1%wHN$`zI}b11DRvyWJF#10!JH`MVOoZTbwWeFaK34Ipu#Cbm-BA6#kjZ~ zCkEOk z)y`0^EM!=b#X=FE zCbddeqxkg?o5r|CAs5NdWjuNe+qlLEY-olpt)G|gaB0m~qmS!%))KJz{czS|d2>0$ zdQ*o!UpxNXo4oL9ANfm8a(J4OMf@rEvI{K z?Q7}f-mlbp_qr1}wBo_X=jUu`>o}3$mN&#+_I15 z=W~7D(CxL=5D{Sk@65B-|_ zx9tm--qySlOkQz_?NcSJxSvZNflnZ|&vnDz^vzt)BYfRuN$+)vH@;q&ysA3=7K^W? zcg6L*ZZ%wv5Be5o-)%{z-$%XoPk#78Mz`vS3)aows`G~z$8L4Yx4?W|@cvdkb-{|y z$)9KIIc&C_isf5eyOrcS%k8qUIs6+9eY~Hb_N@MQ(hok1A{TW|?0e`1)RTGN3Agz- zq0{{TunnV7?3H`S0L4%I#`^XHEvWm`U)z5Q+KGZc_{^7|O1+9=yKi~!Z@#e}@&1b9 zpC*5Qjk?Z9AL&;A3ia*(-HUhZ`U2|N`$Mu2MZZHCpx~h$*PXNXM=1I<8H}JX+3q(# zMLkzMj33yIV*hyQS&1XRLLGd+zMYI;@L{%J+atk0%75_@6#UHtkM>;qZWRCEZy$Td zgHNN--6#Ct3m4vnV(IH!Zap-OPUdoj9zSEv&EK0rvF{xw3ZdXLl#zHow(p_mzxMk7 zpzwDu!#kad;@B#fBMScVMr@%|P;`c^3-f*Aqhz`gg^u2b71dCb%N-qf`K}u-S%*3% z$cAsAUaogLU)Q^Z{-FTjrd*G@zeG1gk<*DlFQJ|;;*ftvfsbGC#P-blQ1@RPXzBZe ziNeGyr$2*Y-0mS__4WfO_I+%|AAVKv4V-c3GabLxQTQCf_;V=stqY&M=A>_-$a%z| zSD~JFuMFcMTJtKCH_YuD|9U%Ngu-7WhW!$Xf4KFu$In=c!q_VhJlczTKCqoQ4u$UF zaY4i6T`0i)6#XjWAN|h@4u9a^Z%1Knzc`mWMAhAiVu>TzqJ<)T+sUr`{sr}Xo53&* zw|>;apAVmV->rXs$$z7`|AG7;zWyQ9eH9LaqxYk==Q4lBcw9t(O%%8sbq(Hd$-v8Z zqcD!bn?62Kpnbu>?Bh zrrwMGs~@6X?*9l;ciE8TqSuoL-dQwn{e*!d>owHL_xE%AcG0jsgo4~p zv9&A@LX_;O$gV6yfXQKV^Omk+S@m zub?Q;ct=+!0>SdZ!fPSrq2=rHAViB(tGUq1X{>5Y)lzQtt-FKe&U} zk<}*=4k$#_JbW*T@#lpdd7TZ1X+c487DrY;NxX3_Eij2USv^Vy$N%ZQXLp^MJdxG+ zq@UG+q@VdU*>>Wk=WpG7IO%8d3K(JgCj+c5Bv-ILk&N)?gDf7Bt*kyI{mhO62JfQr zC!nACD;Z+(Cg4h@XW}_#$7GPnFQ6ZeUAv<5sDQn!pCtXPo>(yP3YXW<`jUXWjtkhv z<+GswSz3PtY-92WxH?bkS(5h)0fQ_L1Z-pVLBPO$w9W|Fb_sQefPPk==OJHbLC!bv z(%{|q{_6hMlYZuB0RxtP97ahrU+_dS!1~-gltUm}CWc5zKwJT^HNVdE3LF5Iiqa|T0%U1#0SU(fc&(1Ld26&!W(9i0y zfB~Lg7HnblB-v)gr4^^FZVK4;BxNX|pS0!szHh}c3u&16|-;RH7d?*;(1p8 zl74P~0jb$fwqW9wi@*8ck&oVyc#8F_#Q9VYodRV0B8sAPw{V7EHXz{jiGT z+jxDjV374I0sS94`JNX~olOS#a{_YzahQ0M9*;(o0V@uyxZrh=*L4B?^w?zz==PHf z&b`d@B529Up`V?r1PoYpgTq$t7YhbhJr~f=`o4f6UcW6Eq+!x7V1P&wK7St4W6Yp{ zZ7g2|4Dh@W(6!!GPeecYS7w*QeRte`)dA%t>;5wL9IH3g-goA{%j(VCpYi^550hgK zv2*&|S1=cRR7!l4^@UpRRf)%0f45*oxt?ZmU9``m>|WOBdtc()EN++jzG?AIJce%v zU%&O{igtO7)$K;hG4~O69$sqw=Wb#Bf9@|?eM-!+duZYzcJHiv|7TBmYs2vW-ZJ+i zcE4-(_X|b-N32iPz0bcLpov<9=lPs?gq?2_KWFF0iu*nKLiZzuzBdw2X};`Z*WZ?) z*QcZRCvM|?>BkIK+~+@Tpa&caHq34hv--PeetC}7+h)hdVnd(vpAh!$k;xB^ksnQj zh6I!i(!l18n>LG|G2E4tb!|#9?bNN}*U)y#X{cT~|LCS|+s-QY=;n>$#~t^`N*3ho zv`y8}q9Y&&b-Og%=6^t0E*K8l#*Nj`dWJMjg|iY+Gt%A}Fim$|=G0ns?H`3^4m+G~ zyU*0~*;(qfO;s=G8=uWXg_6hRbe1q897Ig??lZC6(~zzh)AEo$zO71fbQD5m)-L+i zDoE|avZ7$A$zEGGSHRk(KBqjkLUQyC&jQ1snhcv0P&eq)n?SYn*f7v_x!|1xH9EKi z&xva3{rhAE&J?C)x`h3{I}MAMNXk_igja3o*xx!Q;87dhPpm@R7su$E6M!kD=vj81l zvKwM?$>m*HAt`ZB%0PA6PTY8ARm7V6N7+W#OJA2T0~N(HI4Y1&+@WHh7Bm)>drU)z zroo)Nb<2Vlb&trvyHS^>)S1e)#-68x&#K?S^{G>TGRDW`bXrp!w%$?|F)|EQ;$KH6 z_CzkEz18fX5ltt@K2`QUGcMk#XIz^iuQ~B-4rOSLL(M^aD!@(G+0>4O1?A7;<*U$I zM~SdFIgC`rlGqr~r=1P>ti`1DvZm@fif%au$bFt<+cB&Hea#?{r%bj$_{G7Y@`_q2 zel%q&rfLr`5q@h@GW%70cjG>LX;;)<|GVjZ!_y!oDHRSQ%kR!vt3D#KM$44`l0aF2X18=FH*qD;nBW?m-eYrffcTiIg!*o`5#Ll)V$+bE%dZCGl zc7e85Ln{5fO3z+R0mM+=CMZj7>={saC3;ZCXCKdAUxrln>#CYB+;vv^X4Ac+a9%xr ztfIw6GxI=kUe+qnqwE!Oo?qpaCdIg@UYFu>G|zn8Rvooj0nog!@@lRygAf;s0?zqK ze#O~R1*ukUOHSiwZ(EFPDo=BMqu!!w;vF)lYpOCUP3l@sN`Yyh&jN|P{ALgoWZY@? zoA~TIYwdn0C#xG2s7k74NI8&!Qd)rkn-LTWW)5bg0=BIL3X=WBDC@R+qNRZyCEu6Q zIWH9xfaA{wra|5}L-<3<$O7CdWb(gWOXDMBlBP?$a$`~s;x!p8-mEkMC9@!2kyPK) zcB@r0K1uvin}i*iqb4iJQ>r{kUN&kp6r?Q3 zNH`*=jO=z}fRQaft z_w_xt95QLK$BQ4#<;T93m3X{@<5sKW=Ivvb(FjMj@vEP;##CiPgsm}~w>Dg*!%6Fr z90UbZw Date: Tue, 31 Jan 2023 10:10:07 -0600 Subject: [PATCH 5/8] Update test-coverage.yaml --- .github/workflows/test-coverage.yaml | 60 ++++++++++++++-------------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/.github/workflows/test-coverage.yaml b/.github/workflows/test-coverage.yaml index 5910c1a1..2c5bb502 100644 --- a/.github/workflows/test-coverage.yaml +++ b/.github/workflows/test-coverage.yaml @@ -1,48 +1,50 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help on: push: - branches: - - main - - master + branches: [main, master] pull_request: - branches: - - main - - master + branches: [main, master] name: test-coverage jobs: test-coverage: - runs-on: macOS-latest + runs-on: ubuntu-latest env: GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: r-lib/actions/setup-r@v1 + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true - - uses: r-lib/actions/setup-pandoc@v1 + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::covr + needs: coverage - - name: Query dependencies + - name: Test coverage run: | - install.packages('remotes') - saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) - writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") + covr::codecov( + quiet = FALSE, + clean = FALSE, + install_path = file.path(Sys.getenv("RUNNER_TEMP"), "package") + ) shell: Rscript {0} - - name: Cache R packages - uses: actions/cache@v2 - with: - path: ${{ env.R_LIBS_USER }} - key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} - restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- - - - name: Install dependencies + - name: Show testthat output + if: always() run: | - install.packages(c("remotes")) - remotes::install_deps(dependencies = TRUE) - remotes::install_cran("covr") - shell: Rscript {0} + ## -------------------------------------------------------------------- + find ${{ runner.temp }}/package -name 'testthat.Rout*' -exec cat '{}' \; || true + shell: bash - - name: Test coverage - run: covr::codecov() - shell: Rscript {0} + - name: Upload test results + if: failure() + uses: actions/upload-artifact@v3 + with: + name: coverage-test-failures + path: ${{ runner.temp }}/package From 9106a9df1e98773970b3ed11ddb474f31900abeb Mon Sep 17 00:00:00 2001 From: Derek Ogle Date: Tue, 31 Jan 2023 14:27:48 -0600 Subject: [PATCH 6/8] Addressing #58 --- DESCRIPTION | 2 +- NEWS.md | 3 +- R/psdCalc.R | 75 +++++++++++++++++++++---------- man/psdCalc.Rd | 41 ++++++++++------- tests/testthat/testthat_PSD.R | 83 ++++++++++++++++++++++++++++------- 5 files changed, 147 insertions(+), 57 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 84ba5afd..b3885727 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: FSA Version: 0.9.3.9000 -Date: 2023-1-15 +Date: 2023-1-30 Title: Simple Fisheries Stock Assessment Methods Description: A variety of simple fish stock assessment methods. Authors@R: c( diff --git a/NEWS.md b/NEWS.md index 109a35e3..be2ed311 100644 --- a/NEWS.md +++ b/NEWS.md @@ -13,7 +13,8 @@ * `alkIndivAge()`: Modified. Added a catch for `NA`s in the length sample. Also added a test. This addresses [#88](https://github.com/fishR-Core-Team/FSA/issues/88). * `confint.boot()`: Modified. Changed hard-coding of columns that contained the confidence interval values to find those columns by `grep()`ing for the `%` sign. This fixes an issue related to `car::Confint()` returning the `coef()` results for functions that have a `coef()` method but not for those that do not. Also updated tests to use results from `car::Boot()` rather than the old `car::bootCase()`. -* `PSDlit`: Added info for Redbreast Sunfish and Spotted Sunfish from Bonvecchio *et al.* (2023). This addresses [#100](https://github.com/fishR-Core-Team/FSA/issues/100)). +* `PSDcalc`: Modified. Changed code to allow for missing `species=` as long as `addLens=` is used. This allows the user to provide length categories for a species for which Gabelhouse lengths are not defined. Several new tests were added and some were modified to handle the changing message re: a missing `species=`. The documentation was modified accordingly. This (finally) addresses [#58](https://github.com/fishR-Core-Team/FSA/issues/58). +* `PSDlit`: Modified. Added info for Redbreast Sunfish and Spotted Sunfish from Bonvecchio *et al.* (2023). This addresses [#100](https://github.com/fishR-Core-Team/FSA/issues/100)). * `wSlit`: Modified documentation. Described the `RLP` and `EmP` acronyms and provided references for them. This addresses [#95](https://github.com/fishR-Core-Team/FSA/issues/95)). Added info for Redbreast Sunfish and Spotted Sunfish from Bonvecchio *et al.* (2023). This addresses [#100](https://github.com/fishR-Core-Team/FSA/issues/100)). # FSA 0.9.3 diff --git a/R/psdCalc.R b/R/psdCalc.R index e09f81b1..fa477e48 100644 --- a/R/psdCalc.R +++ b/R/psdCalc.R @@ -2,25 +2,26 @@ #' #' @description Convenience function for calculating (traditional) PSD-X and (incremental) PSD X-Y values for all Gabelhouse lengths and increments thereof. #' -#' @details Computes the (traditional) PSD-X and (incremental) PSD X-Y values, with associated confidence intervals, for each Gabelhouse lengthAll PSD-X and PSD X-Y values are printed if \code{what="all"} (DEFAULT), only PSD-X values are printed if \code{what="traditional"}, only PSD X-Y values are printed if \code{what="incremental"}, and nothing is printed (but the matrix is still returned) if \code{what="none"}. +#' @details Computes the (traditional) PSD-X and (incremental) PSD X-Y values, with associated confidence intervals, for each Gabelhouse length. All PSD-X and PSD X-Y values are printed if \code{what="all"} (DEFAULT), only PSD-X values are printed if \code{what="traditional"}, only PSD X-Y values are printed if \code{what="incremental"}, and nothing is printed (but the matrix is still returned) if \code{what="none"}. #' #' Confidence intervals can be computed with either the multinomial (Default) or binomial distribution as set in \code{method}See details in \code{\link{psdCI}} for more information. +#' This function may be used for species for which Gabelhouse length categories are not defined. In this case do not include a name in \code{species}, but define at least two lengths in \code{addLens} where the first category MUST be called \dQuote{stock}. #' -#' @param formula A formula of the form \code{~length} where \dQuote{length} generically represents a variable in \code{data} that contains the observed lengthsNote that this formula may only contain one variable and it must be numeric. +#' @param formula A formula of the form \code{~length} where \dQuote{length} generically represents a variable in \code{data} that contains the observed lengths. Note that this formula may only contain one variable and it must be numeric. #' @param data A data.frame that minimally contains the observed lengths given in the variable in \code{formula}. -#' @param species A string that contains the species name for which Gabelhouse lengths existSee \code{\link{psdVal}} for details. -#' @param units A string that indicates the type of units used for the lengthsChoices are \code{mm} for millimeters (DEFAULT), \code{cm} for centimeters, and \code{in} for inches. -#' @param what A string that indicates the type of PSD values that will be printedSee details. +#' @param species A string that contains the species name for which Gabelhouse lengths exist. See \code{\link{psdVal}} for details. See details for how to use this function for species for which Gabelhouse lengths are not defined. +#' @param units A string that indicates the type of units used for the lengths. Choices are \code{mm} for millimeters (DEFAULT), \code{cm} for centimeters, and \code{in} for inches. +#' @param what A string that indicates the type of PSD values that will be printed. See details. #' @param drop0Est A logical that indicates whether the PSD values that are zero should be dropped from the output. -#' @param method A character that identifies the confidence interval method to useSee details in \code{\link{psdCI}}. -#' @param addLens A numeric vector that contains minimum lengths for additional categoriesSee \code{\link{psdVal}} for details. -#' @param addNames A string vector that contains names for the additional lengths added with \code{addLens}See \code{\link{psdVal}} for details. +#' @param method A character that identifies the confidence interval method to use. See details in \code{\link{psdCI}}. +#' @param addLens A numeric vector that contains minimum lengths for additional categories. See \code{\link{psdVal}} for details. +#' @param addNames A string vector that contains names for the additional lengths added with \code{addLens}. See \code{\link{psdVal}} for details. #' @param justAdds A logical that indicates whether just the values related to the length sin \code{addLens} should be returned. #' @param conf.level A number that indicates the level of confidence to use for constructing confidence intervals (default is \code{0.95}). -#' @param showIntermediate A logical that indicates whether the number of fish in the category and the number of stock fish (i.e., \dQuote{intermediate} values) should be included in the returned matrixDefault is to not include these values. -#' @param digits A numeric that indicates the number of decimals to round the result toDefault is zero digits following the recommendation of Neumann and Allen (2007). +#' @param showIntermediate A logical that indicates whether the number of fish in the category and the number of stock fish (i.e., \dQuote{intermediate} values) should be included in the returned matrix. Default is to not include these values. +#' @param digits A numeric that indicates the number of decimals to round the result to. Default is zero digits following the recommendation of Neumann and Allen (2007). #' -#' @return A matrix with columns that contain the computed PSD-X or PSD X-Y values and associated confidence intervalsIf \code{showIntermediate=TRUE} then the number of fish in the category and the number of stock fish will also be shown. +#' @return A matrix with columns that contain the computed PSD-X or PSD X-Y values and associated confidence intervals. If \code{showIntermediate=TRUE} then the number of fish in the category and the number of stock fish will also be shown. #' #' @section Testing: Point estimate calculations match those constructed "by hand." #' @@ -32,13 +33,13 @@ #' #' @references Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}Chapman & Hall/CRC, Boca Raton, FL. #' -#' Guy, C.S., R.M. Neumann, and D.W. Willis2006New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS)Fisheries 31:86-87 [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.] +#' Guy, C.S., R.M. Neumann, and D.W. Willis. 2006. New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS). Fisheries 31:86-87. [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.] #' -#' Guy, C.S., R.M. Neumann, D.W. Willis, and R.O. Anderson2006Proportional size distribution (PSD): A further refinement of population size structure index terminologyFisheries 32:348[Was (is?) from http://pubstorage.sdstate.edu/wfs/450-F.pdf.] +#' Guy, C.S., R.M. Neumann, D.W. Willis, and R.O. Anderson2006Proportional size distribution (PSD): A further refinement of population size structure index terminology. Fisheries. 32:348. [Was (is?) from http://pubstorage.sdstate.edu/wfs/450-F.pdf.] #' -#' Neumann, R. M. and Allen, M. S2007Size structure. In Guy, C. S. and Brown, M. L., editors, Analysis and Interpretation of Freshwater Fisheries Data, Chapter 9, pages 375-421. American Fisheries Society, Bethesda, MD. +#' Neumann, R.M. and Allen, M.S. 2007. Size structure. In Guy, C.S. and Brown, M.L., editors, Analysis and Interpretation of Freshwater Fisheries Data, Chapter 9, pages 375-421. American Fisheries Society, Bethesda, MD. #' -#' Willis, D.W., B.R. Murphy, and C.S. Guy1993Stock density indices: development, use, and limitationsReviews in Fisheries Science 1:203-222[Was (is?) from http://web1.cnre.vt.edu/murphybr/web/Readings/Willis\%20et\%20al.pdf.] +#' Willis, D.W., B.R. Murphy, and C.S. Guy. 1993. Stock density indices: development, use, and limitations. Reviews in Fisheries Science 1:203-222. [Was (is?) from http://web1.cnre.vt.edu/murphybr/web/Readings/Willis\%20et\%20al.pdf.] #' #' @keywords hplot #' @@ -46,8 +47,8 @@ #' ## Random length data #' # suppose this is yellow perch to the nearest mm #' yepdf <- data.frame(yepmm=round(c(rnorm(100,mean=125,sd=15), -#' rnorm(50,mean=200,sd=25), -#' rnorm(20,mean=300,sd=40)),0), +#' rnorm(50,mean=200,sd=25), +#' rnorm(20,mean=300,sd=40)),0), #' species=rep("Yellow Perch",170)) #' psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1) #' psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1,drop0Est=TRUE) @@ -83,6 +84,12 @@ #' ## Control the digits #' psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1) #' +#' ## Working with a species not in PSDlit ... same data, but don't give species +#' psdCalc(~yepmm,data=yepdf,addLens=c("stock"=130,"quality"=200,"preferred"=250, +#' "memorable"=300,"trophy"=380)) +#' psdCalc(~yepmm,data=yepdf,addLens=c("stock"=130,"quality"=200, +#' "preferred"=250,"name1"=220)) +#' #' @export psdCalc psdCalc <- function(formula,data,species,units=c("mm","cm","in"), method=c("multinomial","binomial"),conf.level=0.95, @@ -96,13 +103,35 @@ psdCalc <- function(formula,data,species,units=c("mm","cm","in"), ## Check on conf.level iCheckConfLevel(conf.level) - ## make sure species is not missing - if (missing(species)) STOP("Must include a species name in 'species'.") + ## make sure species is not missing, or if it is that addLens have been given + if (!missing(species)) { + brks <- psdVal(species,units=units,incl.zero=FALSE, + addLens=addLens,addNames=addNames) + } else { + ## species is missing so must have and addLens + if (is.null(addLens)) STOP("Must include name in 'species' or lengths in 'addLens'.") + ## ... and addLens must have at least two values + if (length(addLens)<2) STOP("'addLens' must contain at least two length categories.") + ## ... and those lengths must be named ... + if (is.null(names(addLens))) { + if (is.null(addNames)) + STOP("Category names must be defined in 'addLens' or given in 'addNames'.") + if (length(addLens)!=length(addNames)) + STOP("'addLens' and 'addNames' are different lengths.") + names(addLens) <- addNames + } + ## first category must be "stock" + if (names(addLens)[1]!="stock") STOP("First category name must be 'stock'.") + ## another category must be "quality" +# if (!("quality" %in% names(addLens))) +# STOP("One length category must be called 'quality'") + ## looks good so set brks to addLens (but make sure they are ordered) + brks <- addLens[order(addLens)] + } + ## find psd lengths for this species - brks <- psdVal(species,units=units,incl.zero=FALSE, - addLens=addLens,addNames=addNames) ## perform checks and initial preparation of the data.frame - dftemp <- iPrepData4PSD(formula,data,brks["stock"],units) + dftemp <- iPrepData4PSD(formula,data,brks[1],units) ## add the length categorization variable, don't drop unused levels dftemp <- lencat(formula,data=dftemp,breaks=brks,vname="lcatr", use.names=TRUE,droplevels=FALSE) @@ -111,7 +140,7 @@ psdCalc <- function(formula,data,species,units=c("mm","cm","in"), ## make the proportions table ptbl <- prop.table(table(dftemp$lcatr)) ## check to see if some fish are more than quality-sized - if (!cumsum(ptbl)[["quality"]]<1) WARN("No 'quality' or larger fish in sample.") + if (!cumsum(ptbl)[[2]]<1) WARN("No fish in larger than 'stock' categories.") ## compute all traditional and interval PSD values res <- iGetAllPSD(ptbl,n=n,method=method,conf.level=conf.level,digits=digits) ## decide to keep intermediate calculation columns or not (in first two columns) diff --git a/man/psdCalc.Rd b/man/psdCalc.Rd index 13704c34..f4359725 100644 --- a/man/psdCalc.Rd +++ b/man/psdCalc.Rd @@ -21,42 +21,43 @@ psdCalc( ) } \arguments{ -\item{formula}{A formula of the form \code{~length} where \dQuote{length} generically represents a variable in \code{data} that contains the observed lengthsNote that this formula may only contain one variable and it must be numeric.} +\item{formula}{A formula of the form \code{~length} where \dQuote{length} generically represents a variable in \code{data} that contains the observed lengths. Note that this formula may only contain one variable and it must be numeric.} \item{data}{A data.frame that minimally contains the observed lengths given in the variable in \code{formula}.} -\item{species}{A string that contains the species name for which Gabelhouse lengths existSee \code{\link{psdVal}} for details.} +\item{species}{A string that contains the species name for which Gabelhouse lengths exist. See \code{\link{psdVal}} for details. See details for how to use this function for species for which Gabelhouse lengths are not defined.} -\item{units}{A string that indicates the type of units used for the lengthsChoices are \code{mm} for millimeters (DEFAULT), \code{cm} for centimeters, and \code{in} for inches.} +\item{units}{A string that indicates the type of units used for the lengths. Choices are \code{mm} for millimeters (DEFAULT), \code{cm} for centimeters, and \code{in} for inches.} -\item{method}{A character that identifies the confidence interval method to useSee details in \code{\link{psdCI}}.} +\item{method}{A character that identifies the confidence interval method to use. See details in \code{\link{psdCI}}.} \item{conf.level}{A number that indicates the level of confidence to use for constructing confidence intervals (default is \code{0.95}).} -\item{addLens}{A numeric vector that contains minimum lengths for additional categoriesSee \code{\link{psdVal}} for details.} +\item{addLens}{A numeric vector that contains minimum lengths for additional categories. See \code{\link{psdVal}} for details.} -\item{addNames}{A string vector that contains names for the additional lengths added with \code{addLens}See \code{\link{psdVal}} for details.} +\item{addNames}{A string vector that contains names for the additional lengths added with \code{addLens}. See \code{\link{psdVal}} for details.} \item{justAdds}{A logical that indicates whether just the values related to the length sin \code{addLens} should be returned.} -\item{what}{A string that indicates the type of PSD values that will be printedSee details.} +\item{what}{A string that indicates the type of PSD values that will be printed. See details.} \item{drop0Est}{A logical that indicates whether the PSD values that are zero should be dropped from the output.} -\item{showIntermediate}{A logical that indicates whether the number of fish in the category and the number of stock fish (i.e., \dQuote{intermediate} values) should be included in the returned matrixDefault is to not include these values.} +\item{showIntermediate}{A logical that indicates whether the number of fish in the category and the number of stock fish (i.e., \dQuote{intermediate} values) should be included in the returned matrix. Default is to not include these values.} -\item{digits}{A numeric that indicates the number of decimals to round the result toDefault is zero digits following the recommendation of Neumann and Allen (2007).} +\item{digits}{A numeric that indicates the number of decimals to round the result to. Default is zero digits following the recommendation of Neumann and Allen (2007).} } \value{ -A matrix with columns that contain the computed PSD-X or PSD X-Y values and associated confidence intervalsIf \code{showIntermediate=TRUE} then the number of fish in the category and the number of stock fish will also be shown. +A matrix with columns that contain the computed PSD-X or PSD X-Y values and associated confidence intervals. If \code{showIntermediate=TRUE} then the number of fish in the category and the number of stock fish will also be shown. } \description{ Convenience function for calculating (traditional) PSD-X and (incremental) PSD X-Y values for all Gabelhouse lengths and increments thereof. } \details{ -Computes the (traditional) PSD-X and (incremental) PSD X-Y values, with associated confidence intervals, for each Gabelhouse lengthAll PSD-X and PSD X-Y values are printed if \code{what="all"} (DEFAULT), only PSD-X values are printed if \code{what="traditional"}, only PSD X-Y values are printed if \code{what="incremental"}, and nothing is printed (but the matrix is still returned) if \code{what="none"}. +Computes the (traditional) PSD-X and (incremental) PSD X-Y values, with associated confidence intervals, for each Gabelhouse length. All PSD-X and PSD X-Y values are printed if \code{what="all"} (DEFAULT), only PSD-X values are printed if \code{what="traditional"}, only PSD X-Y values are printed if \code{what="incremental"}, and nothing is printed (but the matrix is still returned) if \code{what="none"}. Confidence intervals can be computed with either the multinomial (Default) or binomial distribution as set in \code{method}See details in \code{\link{psdCI}} for more information. +This function may be used for species for which Gabelhouse length categories are not defined. In this case do not include a name in \code{species}, but define at least two lengths in \code{addLens} where the first category MUST be called \dQuote{stock}. } \section{Testing}{ Point estimate calculations match those constructed "by hand." @@ -70,8 +71,8 @@ Confidence intervals can be computed with either the multinomial (Default) or bi ## Random length data # suppose this is yellow perch to the nearest mm yepdf <- data.frame(yepmm=round(c(rnorm(100,mean=125,sd=15), - rnorm(50,mean=200,sd=25), - rnorm(20,mean=300,sd=40)),0), + rnorm(50,mean=200,sd=25), + rnorm(20,mean=300,sd=40)),0), species=rep("Yellow Perch",170)) psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1) psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1,drop0Est=TRUE) @@ -107,17 +108,23 @@ psdCalc(~yepmm,data=yepdf,species="Yellow perch",what="incremental",showInterm=T ## Control the digits psdCalc(~yepmm,data=yepdf,species="Yellow perch",digits=1) +## Working with a species not in PSDlit ... same data, but don't give species +psdCalc(~yepmm,data=yepdf,addLens=c("stock"=130,"quality"=200,"preferred"=250, + "memorable"=300,"trophy"=380)) +psdCalc(~yepmm,data=yepdf,addLens=c("stock"=130,"quality"=200, + "preferred"=250,"name1"=220)) + } \references{ Ogle, D.H. 2016. \href{https://fishr-core-team.github.io/fishR/pages/books.html#introductory-fisheries-analyses-with-r}{Introductory Fisheries Analyses with R}Chapman & Hall/CRC, Boca Raton, FL. -Guy, C.S., R.M. Neumann, and D.W. Willis2006New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS)Fisheries 31:86-87 [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.] +Guy, C.S., R.M. Neumann, and D.W. Willis. 2006. New terminology for proportional stock density (PSD) and relative stock density (RSD): proportional size structure (PSS). Fisheries 31:86-87. [Was (is?) from http://pubstorage.sdstate.edu/wfs/415-F.pdf.] -Guy, C.S., R.M. Neumann, D.W. Willis, and R.O. Anderson2006Proportional size distribution (PSD): A further refinement of population size structure index terminologyFisheries 32:348[Was (is?) from http://pubstorage.sdstate.edu/wfs/450-F.pdf.] +Guy, C.S., R.M. Neumann, D.W. Willis, and R.O. Anderson2006Proportional size distribution (PSD): A further refinement of population size structure index terminology. Fisheries. 32:348. [Was (is?) from http://pubstorage.sdstate.edu/wfs/450-F.pdf.] -Neumann, R. M. and Allen, M. S2007Size structure. In Guy, C. S. and Brown, M. L., editors, Analysis and Interpretation of Freshwater Fisheries Data, Chapter 9, pages 375-421. American Fisheries Society, Bethesda, MD. +Neumann, R.M. and Allen, M.S. 2007. Size structure. In Guy, C.S. and Brown, M.L., editors, Analysis and Interpretation of Freshwater Fisheries Data, Chapter 9, pages 375-421. American Fisheries Society, Bethesda, MD. -Willis, D.W., B.R. Murphy, and C.S. Guy1993Stock density indices: development, use, and limitationsReviews in Fisheries Science 1:203-222[Was (is?) from http://web1.cnre.vt.edu/murphybr/web/Readings/Willis\%20et\%20al.pdf.] +Willis, D.W., B.R. Murphy, and C.S. Guy. 1993. Stock density indices: development, use, and limitations. Reviews in Fisheries Science 1:203-222. [Was (is?) from http://web1.cnre.vt.edu/murphybr/web/Readings/Willis\%20et\%20al.pdf.] } \seealso{ See \code{\link{psdVal}}, \code{\link{psdPlot}}, \code{\link{psdAdd}}, \code{\link{PSDlit}}, \code{\link{tictactoe}}, \code{\link{lencat}}, and \code{\link{rcumsum}} for related functionality. diff --git a/tests/testthat/testthat_PSD.R b/tests/testthat/testthat_PSD.R index f4f54d2b..8be2e853 100644 --- a/tests/testthat/testthat_PSD.R +++ b/tests/testthat/testthat_PSD.R @@ -98,6 +98,9 @@ test_that("psdCI() messages",{ }) test_that("psdCalc() messages",{ + ## species name does not exist in PSDlit + expect_error(psdCalc(~tl,data=tmp,species="Slimy Sculpin"), + "Gabelhouse lengths do not exist for Slimy Sculpin") ## get Gabelhouse lengths for Yellow Perch ghl <- psdVal("Yellow perch") ## restrict data.frame to no fish @@ -111,11 +114,7 @@ test_that("psdCalc() messages",{ ## restrict data.frame to no >=quality fish tmp <- subset(df,tl Date: Tue, 31 Jan 2023 17:23:26 -0600 Subject: [PATCH 7/8] Update R-CMD-check.yaml --- .github/workflows/R-CMD-check.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml index fcbcebec..bce53238 100644 --- a/.github/workflows/R-CMD-check.yaml +++ b/.github/workflows/R-CMD-check.yaml @@ -2,9 +2,9 @@ # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help on: push: - branches: [main, master] + branches: [main, master, dev] pull_request: - branches: [main, master] + branches: [main, master, dev] name: R-CMD-check From ed80ad2f75520ca37f015c7cd82d164d823a5b2b Mon Sep 17 00:00:00 2001 From: Derek Ogle Date: Wed, 1 Feb 2023 09:25:38 -0600 Subject: [PATCH 8/8] Preparing v0.9.4 --- DESCRIPTION | 10 +- NEWS.md | 5 +- cran-comments/cran-comments-v0_9_4.md | 11 + docs/LICENSE-text.html | 8 +- docs/authors.html | 27 +- docs/index.html | 19 +- docs/news/index.html | 19 +- docs/pkgdown.yml | 4 +- docs/reference/BluegillJL.html | 8 +- docs/reference/BrookTroutTH.html | 8 +- docs/reference/ChinookArg.html | 8 +- docs/reference/CodNorwegian.html | 8 +- docs/reference/CutthroatAL.html | 8 +- docs/reference/Ecoli.html | 8 +- docs/reference/FSA-internals.html | 8 +- docs/reference/FSA.html | 8 +- docs/reference/Mirex.html | 8 +- docs/reference/PSDlit.html | 8 +- docs/reference/PikeNY.html | 8 +- docs/reference/PikeNYPartial1.html | 8 +- docs/reference/SMBassLS.html | 8 +- docs/reference/SMBassWB.html | 8 +- docs/reference/Schnute.html | 8 +- docs/reference/SpotVA1.html | 8 +- docs/reference/Summarize.html | 8 +- docs/reference/WR79.html | 8 +- docs/reference/WSlit.html | 25 +- docs/reference/WhitefishLC.html | 8 +- docs/reference/addZeroCatch.html | 8 +- docs/reference/ageBias.html | 8 +- docs/reference/agePrecision.html | 8 +- docs/reference/alkAgeDist.html | 8 +- docs/reference/alkIndivAge.html | 62 ++--- docs/reference/alkMeanVar.html | 8 +- docs/reference/alkPlot.html | 8 +- docs/reference/binCI.html | 8 +- docs/reference/capHistConvert.html | 8 +- docs/reference/capHistSum.html | 8 +- docs/reference/catchCurve.html | 8 +- docs/reference/chapmanRobson.html | 12 +- docs/reference/col2rgbt.html | 8 +- docs/reference/depletion.html | 8 +- docs/reference/dunnTest.html | 8 +- docs/reference/expandCounts.html | 130 ++++----- docs/reference/expandLenFreq.html | 8 +- docs/reference/extraTests.html | 8 +- docs/reference/fact2num.html | 8 +- docs/reference/fishR.html | 8 +- docs/reference/geomean.html | 8 +- docs/reference/growthModels.html | 362 +++++++++++--------------- docs/reference/headtail.html | 8 +- docs/reference/hist.formula.html | 8 +- docs/reference/histFromSum.html | 8 +- docs/reference/hyperCI.html | 8 +- docs/reference/index.html | 8 +- docs/reference/knitUtil.html | 8 +- docs/reference/ksTest.html | 8 +- docs/reference/lagratio.html | 8 +- docs/reference/lencat.html | 8 +- docs/reference/logbtcf.html | 8 +- docs/reference/lwCompPreds.html | 8 +- docs/reference/metaM.html | 26 +- docs/reference/mrClosed.html | 8 +- docs/reference/mrOpen.html | 14 +- docs/reference/nlsBoot.html | 8 +- docs/reference/nlsTracePlot.html | 8 +- docs/reference/perc.html | 8 +- docs/reference/plotAB.html | 8 +- docs/reference/poiCI.html | 8 +- docs/reference/psdAdd.html | 8 +- docs/reference/psdCI.html | 12 +- docs/reference/psdCalc.html | 53 ++-- docs/reference/psdPlot.html | 8 +- docs/reference/psdVal.html | 8 +- docs/reference/rSquared.html | 8 +- docs/reference/rcumsum.html | 8 +- docs/reference/removal.html | 8 +- docs/reference/se.html | 8 +- docs/reference/srStarts.html | 8 +- docs/reference/sumTable.html | 8 +- docs/reference/tictactoe.html | 8 +- docs/reference/validn.html | 8 +- docs/reference/vbStarts.html | 8 +- docs/reference/wrAdd.html | 8 +- docs/reference/wsVal.html | 113 ++++---- inst/CITATION | 18 +- 86 files changed, 715 insertions(+), 751 deletions(-) create mode 100644 cran-comments/cran-comments-v0_9_4.md diff --git a/DESCRIPTION b/DESCRIPTION index b3885727..b20c7b94 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,15 +1,15 @@ Package: FSA -Version: 0.9.3.9000 -Date: 2023-1-30 +Version: 0.9.4 +Date: 2023-2-1 Title: Simple Fisheries Stock Assessment Methods Description: A variety of simple fish stock assessment methods. Authors@R: c( - person("Derek", "Ogle", email="DerekOgle51@gmail.com", + person(c("Derek","H."), "Ogle", email="DerekOgle51@gmail.com", role=c("aut","cre"), comment=c(ORCID="0000-0002-0370-9299")), - person("Jason", "Doll", email="jason.doll@fmarion.edu", + person(c("Jason","C."), "Doll", email="jason.doll@fmarion.edu", role=c("aut")), - person("Powell", "Wheeler", email="powell.wheeler@gmail.com", + person(c("A.","Powell"), "Wheeler", email="powell.wheeler@gmail.com", role="aut"), person("Alexis", "Dinno", email="alexis.dinno@pdx.edu", role="aut", diff --git a/NEWS.md b/NEWS.md index be2ed311..cb73bd7a 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,4 +1,4 @@ -# FSA 0.9.3.9000 +# FSA 0.9.4 * Changes related to moving to fishR-Core-Team * Updated sticker. * Changed DHO e-mail address (in DESCRIPTION and in all author fields of the documentation). Partially address [#86](https://github.com/fishR-Core-Team/FSA/issues/86). @@ -10,6 +10,7 @@ * Added links to CSV files for all data sets. This addresses [#96](https://github.com/fishR-Core-Team/FSA/issues/96). * Changed theme in `_pkgdown.yml` to match that of `FSAdata` and more closely match `fishR`. * Removed most recent dates from NEWS file as `pkgdown` picks up the CRAN release date to add. + * Updated `CITATION` (to match that required for next version of R). * `alkIndivAge()`: Modified. Added a catch for `NA`s in the length sample. Also added a test. This addresses [#88](https://github.com/fishR-Core-Team/FSA/issues/88). * `confint.boot()`: Modified. Changed hard-coding of columns that contained the confidence interval values to find those columns by `grep()`ing for the `%` sign. This fixes an issue related to `car::Confint()` returning the `coef()` results for functions that have a `coef()` method but not for those that do not. Also updated tests to use results from `car::Boot()` rather than the old `car::bootCase()`. @@ -161,7 +162,7 @@ * Updated tests for changes in the `fishmethods` package (`vblrt()` replaced with `growthlrt()` and `T=` replaced with `TC=` in `M.empirical()`) per CRAN request. # FSA 0.8.21 -* Added a webpage. Setup Travis-CI to handle updates ([See this](https://www.datacamp.com/community/tutorials/cd-package-docs-pkgdown-travis)). +* Added a webpage. Setup Travis-CI to handle updates. * Added a hex sticker logo. * Added `withr` to Imports (see usages below). * Added `Encoding: UTF-8` to DESCRIPTION. diff --git a/cran-comments/cran-comments-v0_9_4.md b/cran-comments/cran-comments-v0_9_4.md new file mode 100644 index 00000000..a78609b6 --- /dev/null +++ b/cran-comments/cran-comments-v0_9_4.md @@ -0,0 +1,11 @@ +* This updates the existing FSA package on CRAN. + +## Notes +* The maintainer's e-mail address has been changed from derek at derekogle.com to DerekOgle51 at gmail.com. +* There may be a note about "fishR" being misspelled in the description. This is not a misspelling. + +## Testing Environments +* My Windows machine. +* Win Builder -- old-release, release, and development. +* Mac Builder +* GitHub Action (R-CMD-check.yaml) diff --git a/docs/LICENSE-text.html b/docs/LICENSE-text.html index 13548f0f..982fc39e 100644 --- a/docs/LICENSE-text.html +++ b/docs/LICENSE-text.html @@ -1,5 +1,5 @@ -License • FSALicense • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/authors.html b/docs/authors.html index 1635200b..1beb37a3 100644 --- a/docs/authors.html +++ b/docs/authors.html @@ -1,5 +1,5 @@ -Authors and Citation • FSAAuthors and Citation • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    @@ -91,11 +94,11 @@

    Citation

    diff --git a/docs/index.html b/docs/index.html index 01ba9bcf..bceaf4fa 100644 --- a/docs/index.html +++ b/docs/index.html @@ -14,10 +14,10 @@ - - + + - + Changelog • FSAChangelog • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    -

    FSA 0.9.3.9000

    +

    FSA 0.9.4

    • Changes related to moving to fishR-Core-Team
      • Updated sticker.
      • Changed DHO e-mail address (in DESCRIPTION and in all author fields of the documentation). Partially address #86.
      • @@ -67,11 +67,18 @@

        FSA 0.9.3.90
      • Added links to CSV files for all data sets. This addresses #96.
      • Changed theme in _pkgdown.yml to match that of FSAdata and more closely match fishR.
      • Removed most recent dates from NEWS file as pkgdown picks up the CRAN release date to add.
      • +
      • Updated CITATION (to match that required for next version of R).
    • alkIndivAge(): Modified. Added a catch for NAs in the length sample. Also added a test. This addresses #88.
    • confint.boot(): Modified. Changed hard-coding of columns that contained the confidence interval values to find those columns by grep()ing for the % sign. This fixes an issue related to car::Confint() returning the coef() results for functions that have a coef() method but not for those that do not. Also updated tests to use results from car::Boot() rather than the old car::bootCase().
    • +
    • +PSDcalc: Modified. Changed code to allow for missing species= as long as addLens= is used. This allows the user to provide length categories for a species for which Gabelhouse lengths are not defined. Several new tests were added and some were modified to handle the changing message re: a missing species=. The documentation was modified accordingly. This (finally) addresses #58.
    • +
    • +PSDlit: Modified. Added info for Redbreast Sunfish and Spotted Sunfish from Bonvecchio et al. (2023). This addresses #100).
    • +
    • +wSlit: Modified documentation. Described the RLP and EmP acronyms and provided references for them. This addresses #95). Added info for Redbreast Sunfish and Spotted Sunfish from Bonvecchio et al. (2023). This addresses #100).

    FSA 0.9.3

    CRAN release: 2022-02-18

    @@ -310,7 +317,7 @@

    FSA 0.8.22

    FSA 0.8.21

    CRAN release: 2018-11-03

    -
    diff --git a/docs/pkgdown.yml b/docs/pkgdown.yml index 21e98315..9f8a9813 100644 --- a/docs/pkgdown.yml +++ b/docs/pkgdown.yml @@ -1,8 +1,8 @@ pandoc: 2.19.2 -pkgdown: 2.0.6 +pkgdown: 2.0.7 pkgdown_sha: ~ articles: {} -last_built: 2022-12-19T16:19Z +last_built: 2023-02-01T15:03Z urls: reference: https://fishr-core-team.github.io/FSA/reference article: https://fishr-core-team.github.io/FSA/articles diff --git a/docs/reference/BluegillJL.html b/docs/reference/BluegillJL.html index 6e994c16..d9552803 100644 --- a/docs/reference/BluegillJL.html +++ b/docs/reference/BluegillJL.html @@ -1,5 +1,5 @@ -Capture histories (2 samples) of Bluegill from Jewett Lake, MI. — BluegillJL • FSACapture histories (2 samples) of Bluegill from Jewett Lake, MI. — BluegillJL • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/BrookTroutTH.html b/docs/reference/BrookTroutTH.html index f3b370ed..bc8a32fa 100644 --- a/docs/reference/BrookTroutTH.html +++ b/docs/reference/BrookTroutTH.html @@ -1,5 +1,5 @@ -Catch-at-age for Tobin Harbor, Isle Royale Brook Trout. — BrookTroutTH • FSACatch-at-age for Tobin Harbor, Isle Royale Brook Trout. — BrookTroutTH • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/ChinookArg.html b/docs/reference/ChinookArg.html index cc118caa..aef0fb9e 100644 --- a/docs/reference/ChinookArg.html +++ b/docs/reference/ChinookArg.html @@ -1,5 +1,5 @@ -Lengths and weights for Chinook Salmon from three locations in Argentina. — ChinookArg • FSALengths and weights for Chinook Salmon from three locations in Argentina. — ChinookArg • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/CodNorwegian.html b/docs/reference/CodNorwegian.html index 00cd016a..901a6154 100644 --- a/docs/reference/CodNorwegian.html +++ b/docs/reference/CodNorwegian.html @@ -1,5 +1,5 @@ -Stock and recruitment data for Norwegian cod, 1937-1960. — CodNorwegian • FSAStock and recruitment data for Norwegian cod, 1937-1960. — CodNorwegian • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/CutthroatAL.html b/docs/reference/CutthroatAL.html index 147f0a08..86473a13 100644 --- a/docs/reference/CutthroatAL.html +++ b/docs/reference/CutthroatAL.html @@ -1,5 +1,5 @@ -Capture histories (9 samples) of Cutthroat Trout from Auke Lake. — CutthroatAL • FSACapture histories (9 samples) of Cutthroat Trout from Auke Lake. — CutthroatAL • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/Ecoli.html b/docs/reference/Ecoli.html index c027bb61..4fb55844 100644 --- a/docs/reference/Ecoli.html +++ b/docs/reference/Ecoli.html @@ -1,5 +1,5 @@ -Population growth of Escherichia coli. — Ecoli • FSAPopulation growth of Escherichia coli. — Ecoli • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/FSA-internals.html b/docs/reference/FSA-internals.html index 95a345c5..09064ca5 100644 --- a/docs/reference/FSA-internals.html +++ b/docs/reference/FSA-internals.html @@ -1,5 +1,5 @@ -Internal functions. — .onAttach • FSAInternal functions. — .onAttach • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/FSA.html b/docs/reference/FSA.html index 332b6d60..00e36e7d 100644 --- a/docs/reference/FSA.html +++ b/docs/reference/FSA.html @@ -1,5 +1,5 @@ -Fisheries stock assessment methods and data. — FSA • FSAFisheries stock assessment methods and data. — FSA • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/Mirex.html b/docs/reference/Mirex.html index 7b04bec6..f2cc14d9 100644 --- a/docs/reference/Mirex.html +++ b/docs/reference/Mirex.html @@ -1,5 +1,5 @@ -Mirex concentration, weight, capture year, and species of Lake Ontario salmon. — Mirex • FSAMirex concentration, weight, capture year, and species of Lake Ontario salmon. — Mirex • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/PSDlit.html b/docs/reference/PSDlit.html index 1a502f25..d25c6853 100644 --- a/docs/reference/PSDlit.html +++ b/docs/reference/PSDlit.html @@ -1,5 +1,5 @@ -Gabelhouse five-cell length categories for various species. — PSDlit • FSAGabelhouse five-cell length categories for various species. — PSDlit • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/PikeNY.html b/docs/reference/PikeNY.html index 5199d543..22899efd 100644 --- a/docs/reference/PikeNY.html +++ b/docs/reference/PikeNY.html @@ -1,5 +1,5 @@ -Summarized multiple mark-recapture data for all Northern Pike from Buckhorn Marsh, NY. — PikeNY • FSASummarized multiple mark-recapture data for all Northern Pike from Buckhorn Marsh, NY. — PikeNY • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/PikeNYPartial1.html b/docs/reference/PikeNYPartial1.html index e93e4fa6..d7ac669c 100644 --- a/docs/reference/PikeNYPartial1.html +++ b/docs/reference/PikeNYPartial1.html @@ -1,5 +1,5 @@ -Capture histories (4 samples), in capture history format, of a subset of Northern Pike from Buckhorn Marsh, NY. — PikeNYPartial1 • FSACapture histories (4 samples), in capture history format, of a subset of Northern Pike from Buckhorn Marsh, NY. — PikeNYPartial1 • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/SMBassLS.html b/docs/reference/SMBassLS.html index a4d85259..00d1d9f4 100644 --- a/docs/reference/SMBassLS.html +++ b/docs/reference/SMBassLS.html @@ -1,5 +1,5 @@ -Catch-effort data for Little Silver Lake (Ont) Smallmouth Bass. — SMBassLS • FSACatch-effort data for Little Silver Lake (Ont) Smallmouth Bass. — SMBassLS • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/SMBassWB.html b/docs/reference/SMBassWB.html index 9b2f8c4d..d396b047 100644 --- a/docs/reference/SMBassWB.html +++ b/docs/reference/SMBassWB.html @@ -1,5 +1,5 @@ -Growth increment data for West Bearskin Lake, MN, Smallmouth Bass. — SMBassWB • FSAGrowth increment data for West Bearskin Lake, MN, Smallmouth Bass. — SMBassWB • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/Schnute.html b/docs/reference/Schnute.html index b41d8c45..a23a7f14 100644 --- a/docs/reference/Schnute.html +++ b/docs/reference/Schnute.html @@ -1,5 +1,5 @@ -The four-parameter growth function from Schnute (1981). — Schnute • FSAThe four-parameter growth function from Schnute (1981). — Schnute • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/SpotVA1.html b/docs/reference/SpotVA1.html index 0a8b8f32..ea37f77a 100644 --- a/docs/reference/SpotVA1.html +++ b/docs/reference/SpotVA1.html @@ -1,5 +1,5 @@ -Age and length of spot. — SpotVA1 • FSAAge and length of spot. — SpotVA1 • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/Summarize.html b/docs/reference/Summarize.html index 3eb6a765..1626a4e4 100644 --- a/docs/reference/Summarize.html +++ b/docs/reference/Summarize.html @@ -1,5 +1,5 @@ -Summary statistics for a numeric variable. — Summarize • FSASummary statistics for a numeric variable. — Summarize • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/WR79.html b/docs/reference/WR79.html index 5627c25f..2136bbc9 100644 --- a/docs/reference/WR79.html +++ b/docs/reference/WR79.html @@ -1,5 +1,5 @@ -Ages and lengths for a hypothetical sample from Westerheim and Ricker (1979). — WR79 • FSAAges and lengths for a hypothetical sample from Westerheim and Ricker (1979). — WR79 • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/WSlit.html b/docs/reference/WSlit.html index 6fbba84f..6ce8ace5 100644 --- a/docs/reference/WSlit.html +++ b/docs/reference/WSlit.html @@ -1,5 +1,5 @@ -All known standard weight equations. — WSlit • FSAAll known standard weight equations. — WSlit • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/WhitefishLC.html b/docs/reference/WhitefishLC.html index e60610ba..c0450238 100644 --- a/docs/reference/WhitefishLC.html +++ b/docs/reference/WhitefishLC.html @@ -1,5 +1,5 @@ -Assigned ages from two readers on three structures for Lake Whitefish from Lake Champlain. — WhitefishLC • FSAAssigned ages from two readers on three structures for Lake Whitefish from Lake Champlain. — WhitefishLC • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/addZeroCatch.html b/docs/reference/addZeroCatch.html index ca2c7904..8702eb4f 100644 --- a/docs/reference/addZeroCatch.html +++ b/docs/reference/addZeroCatch.html @@ -1,5 +1,5 @@ -Adds zeros for catches of species not collected in some sampling events. — addZeroCatch • FSAAdds zeros for catches of species not collected in some sampling events. — addZeroCatch • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/ageBias.html b/docs/reference/ageBias.html index bccc205d..93c04d04 100644 --- a/docs/reference/ageBias.html +++ b/docs/reference/ageBias.html @@ -1,5 +1,5 @@ -Compute and view possible differences between paired sets of ages. — ageBias • FSACompute and view possible differences between paired sets of ages. — ageBias • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/agePrecision.html b/docs/reference/agePrecision.html index ec55d9f0..01b7c269 100644 --- a/docs/reference/agePrecision.html +++ b/docs/reference/agePrecision.html @@ -1,5 +1,5 @@ -Compute measures of precision among sets of ages. — agePrecision • FSACompute measures of precision among sets of ages. — agePrecision • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/alkAgeDist.html b/docs/reference/alkAgeDist.html index 96fe9f88..8edd52e1 100644 --- a/docs/reference/alkAgeDist.html +++ b/docs/reference/alkAgeDist.html @@ -1,5 +1,5 @@ -Proportions-at-age from an age-length key — alkAgeDist • FSAProportions-at-age from an age-length key — alkAgeDist • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/alkIndivAge.html b/docs/reference/alkIndivAge.html index 9c8b7588..ece2d2ef 100644 --- a/docs/reference/alkIndivAge.html +++ b/docs/reference/alkIndivAge.html @@ -1,5 +1,5 @@ -Use an age-length key to assign age to individuals in the unaged sample. — alkIndivAge • FSAUse an age-length key to assign age to individuals in the unaged sample. — alkIndivAge • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    @@ -384,11 +384,11 @@

    Examples -

    Developed by Derek Ogle, Jason Doll, Powell Wheeler, Alexis Dinno.

    +

    Developed by Derek H. Ogle, Jason C. Doll, A. Powell Wheeler, Alexis Dinno.

    diff --git a/docs/reference/alkMeanVar.html b/docs/reference/alkMeanVar.html index fc9a0e72..a27ab88f 100644 --- a/docs/reference/alkMeanVar.html +++ b/docs/reference/alkMeanVar.html @@ -1,5 +1,5 @@ -Mean Values-at-age from an age-length key — alkMeanVar • FSAMean Values-at-age from an age-length key — alkMeanVar • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/alkPlot.html b/docs/reference/alkPlot.html index 39e349da..6f3287e6 100644 --- a/docs/reference/alkPlot.html +++ b/docs/reference/alkPlot.html @@ -1,5 +1,5 @@ -Plots to visualize age-length keys. — alkPlot • FSAPlots to visualize age-length keys. — alkPlot • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/binCI.html b/docs/reference/binCI.html index 299f723f..60d50e66 100644 --- a/docs/reference/binCI.html +++ b/docs/reference/binCI.html @@ -1,5 +1,5 @@ -Confidence intervals for binomial probability of success. — binCI • FSAConfidence intervals for binomial probability of success. — binCI • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/capHistConvert.html b/docs/reference/capHistConvert.html index e5c0dcc7..adb4e81b 100644 --- a/docs/reference/capHistConvert.html +++ b/docs/reference/capHistConvert.html @@ -1,5 +1,5 @@ -Convert between capture history data.frame formats. — capHistConvert • FSAConvert between capture history data.frame formats. — capHistConvert • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/capHistSum.html b/docs/reference/capHistSum.html index 83fb5219..159e791a 100644 --- a/docs/reference/capHistSum.html +++ b/docs/reference/capHistSum.html @@ -1,5 +1,5 @@ -Summarize capture histories in individual fish format. — capHistSum • FSASummarize capture histories in individual fish format. — capHistSum • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/catchCurve.html b/docs/reference/catchCurve.html index 196230b5..5027cdd5 100644 --- a/docs/reference/catchCurve.html +++ b/docs/reference/catchCurve.html @@ -1,5 +1,5 @@ -Mortality estimates from the descending limb of a catch curve. — catchCurve • FSAMortality estimates from the descending limb of a catch curve. — catchCurve • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/chapmanRobson.html b/docs/reference/chapmanRobson.html index 6a409c07..763d3f8d 100644 --- a/docs/reference/chapmanRobson.html +++ b/docs/reference/chapmanRobson.html @@ -1,5 +1,5 @@ -Computes Chapman-Robson estimates of S and Z. — chapmanRobson • FSAComputes Chapman-Robson estimates of S and Z. — chapmanRobson • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4

    Testing

    Tested the results of chapmanRobson against the results in Miranda and Bettoli (2007). The point estimates of S matched perfectly but the SE of S did not because Miranda and Bettoli used a rounded estimate of S in the calculation of the SE of S but chapmanRobson does not.

    -

    Tested the results against the results from agesurv in fishmethods using the rockbass data.frame in fishmethods. Results for Z and the SE of Z matched perfectly for non-bias-corrected results. The estimate of Z, but not the SE of Z, matched for the bias-corrected (following Smith et al. (2012)) results. FSA uses equation 2 from Smith et al. (2012) whereas fishmethods appears to use equation 5 from the same source to estimate the SE of Z.

    +

    Tested the results against the results from agesurv in fishmethods using the rockbass data.frame in fishmethods. Results for Z and the SE of Z matched perfectly for non-bias-corrected results. The estimate of Z, but not the SE of Z, matched for the bias-corrected (following Smith et al. (2012)) results. FSA uses equation 2 from Smith et al. (2012) whereas fishmethods appears to use equation 5 from the same source to estimate the SE of Z.

    IFAR Chapter

    @@ -296,11 +296,11 @@

    Examples -

    Developed by Derek Ogle, Jason Doll, Powell Wheeler, Alexis Dinno.

    +

    Developed by Derek H. Ogle, Jason C. Doll, A. Powell Wheeler, Alexis Dinno.

    diff --git a/docs/reference/col2rgbt.html b/docs/reference/col2rgbt.html index 6952f006..d0abf36e 100644 --- a/docs/reference/col2rgbt.html +++ b/docs/reference/col2rgbt.html @@ -1,5 +1,5 @@ -Converts an R color to RGB (red/green/blue) including a transparency (alpha channel). — col2rgbt • FSAConverts an R color to RGB (red/green/blue) including a transparency (alpha channel). — col2rgbt • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/depletion.html b/docs/reference/depletion.html index 81239598..24f4c3e2 100644 --- a/docs/reference/depletion.html +++ b/docs/reference/depletion.html @@ -1,5 +1,5 @@ -Computes the Leslie or DeLury population estimate from catch and effort data. — depletion • FSAComputes the Leslie or DeLury population estimate from catch and effort data. — depletion • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/dunnTest.html b/docs/reference/dunnTest.html index 7e274d79..f22a2f2e 100644 --- a/docs/reference/dunnTest.html +++ b/docs/reference/dunnTest.html @@ -1,5 +1,5 @@ -Dunn's Kruskal-Wallis Multiple Comparisons. — dunnTest • FSADunn's Kruskal-Wallis Multiple Comparisons. — dunnTest • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/expandCounts.html b/docs/reference/expandCounts.html index 17a98c78..491700fe 100644 --- a/docs/reference/expandCounts.html +++ b/docs/reference/expandCounts.html @@ -1,5 +1,5 @@ -Repeat individual fish data (including lengths) from tallied counts. — expandCounts • FSARepeat individual fish data (including lengths) from tallied counts. — expandCounts • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/expandLenFreq.html b/docs/reference/expandLenFreq.html index 3d4de942..ad93ca9f 100644 --- a/docs/reference/expandLenFreq.html +++ b/docs/reference/expandLenFreq.html @@ -1,5 +1,5 @@ -Expands a length frequency based on a subsample. — expandLenFreq • FSAExpands a length frequency based on a subsample. — expandLenFreq • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/extraTests.html b/docs/reference/extraTests.html index 748bc6c3..70268163 100644 --- a/docs/reference/extraTests.html +++ b/docs/reference/extraTests.html @@ -1,5 +1,5 @@ -Likelihood ratio and extra sum-of-squares tests. — extraTests • FSALikelihood ratio and extra sum-of-squares tests. — extraTests • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/fact2num.html b/docs/reference/fact2num.html index 624a579e..cab5953c 100644 --- a/docs/reference/fact2num.html +++ b/docs/reference/fact2num.html @@ -1,5 +1,5 @@ -Converts "numeric" factor levels to numeric values. — fact2num • FSAConverts "numeric" factor levels to numeric values. — fact2num • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/fishR.html b/docs/reference/fishR.html index f90e25e8..c4efe70b 100644 --- a/docs/reference/fishR.html +++ b/docs/reference/fishR.html @@ -1,5 +1,5 @@ -Opens web pages associated with the fishR website. — fishR • FSAOpens web pages associated with the fishR website. — fishR • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/geomean.html b/docs/reference/geomean.html index 01c7b2af..b141e373 100644 --- a/docs/reference/geomean.html +++ b/docs/reference/geomean.html @@ -1,5 +1,5 @@ -Calculates the geometric mean or geometric standard deviation. — geomean • FSACalculates the geometric mean or geometric standard deviation. — geomean • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
    diff --git a/docs/reference/growthModels.html b/docs/reference/growthModels.html index 51b6b9b7..9dd9f2fd 100644 --- a/docs/reference/growthModels.html +++ b/docs/reference/growthModels.html @@ -1,5 +1,5 @@ -Creates a function for a specific parameterization of the von Bertalanffy, Gompertz, Richards, and logistic growth functions. — growthModels • FSACreates a function for a specific parameterization of the von Bertalanffy, Gompertz, Richards, and logistic growth functions. — growthModels • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4
  • Richards

    • Within FSA, Linf is the mean asymptotic length, ti is the age at the inflection point, k controls the slope at the inflection point (maximum relative growth rate), b is dimensionless but related to the vertical position (i.e., size) of the inflection point, a is dimensionless but related to the horizontal position (i.e., age) of the inflection point, and L0 is the mean length at age-0.

    • @@ -199,263 +199,205 @@

      Examples
      ###########################################################
       ## Simple Examples -- Von B
       ( vb1 <- vbFuns() )
      -#> function (t, Linf, K = NULL, t0 = NULL) 
      -#> {
      -#>     if (length(Linf) == 3) {
      -#>         K <- Linf[[2]]
      -#>         t0 <- Linf[[3]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     Linf * (1 - exp(-K * (t - t0)))
      -#> }
      -#> <bytecode: 0x000002105fba4d48>
      -#> <environment: 0x000002105fbcbe48>
      +#> function(t,Linf,K=NULL,t0=NULL) {
      +#>   if (length(Linf)==3) { K <- Linf[[2]]
      +#>                          t0 <- Linf[[3]]
      +#>                          Linf <- Linf[[1]] }
      +#>   Linf*(1-exp(-K*(t-t0)))
      +#>   }
      +#> <bytecode: 0x00000237efcca370>
      +#> <environment: 0x00000237efbd29a0>
       ages <- 0:20
       plot(vb1(ages,Linf=20,K=0.3,t0=-0.2)~ages,type="b",pch=19)
       
       ( vb2 <- vbFuns("Francis") )
      -#> function (t, L1, L2 = NULL, L3 = NULL, t1, t3 = NULL) 
      -#> {
      -#>     if (length(L1) == 3) {
      -#>         L2 <- L1[[2]]
      -#>         L3 <- L1[[3]]
      -#>         L1 <- L1[[1]]
      -#>     }
      -#>     if (length(t1) == 2) {
      -#>         t3 <- t1[[2]]
      -#>         t1 <- t1[[1]]
      -#>     }
      -#>     r <- (L3 - L2)/(L2 - L1)
      -#>     L1 + (L3 - L1) * ((1 - r^(2 * ((t - t1)/(t3 - t1))))/(1 - 
      -#>         r^2))
      -#> }
      -#> <bytecode: 0x000002105fbad888>
      -#> <environment: 0x000002105fd31f38>
      +#> function(t,L1,L2=NULL,L3=NULL,t1,t3=NULL) {
      +#>   if (length(L1)==3) { L2 <- L1[[2]]; L3 <- L1[[3]]; L1 <- L1[[1]] }
      +#>   if (length(t1)==2) { t3 <- t1[[2]]; t1 <- t1[[1]] }
      +#>   r <- (L3-L2)/(L2-L1)
      +#>   L1+(L3-L1)*((1-r^(2*((t-t1)/(t3-t1))))/(1-r^2))
      +#>   }
      +#> <bytecode: 0x00000237efc83db0>
      +#> <environment: 0x00000237ef9e3f38>
       plot(vb2(ages,L1=10,L2=19,L3=20,t1=2,t3=18)~ages,type="b",pch=19)
       
       ( vb2c <- vbFuns("Francis",simple=TRUE) )   # compare to vb2
      -#> function (t, L1, L2, L3, t1, t3) 
      -#> {
      -#>     r <- (L3 - L2)/(L2 - L1)
      -#>     L1 + (L3 - L1) * ((1 - r^(2 * ((t - t1)/(t3 - t1))))/(1 - 
      -#>         r^2))
      -#> }
      -#> <bytecode: 0x000002105fbb9a00>
      -#> <environment: 0x000002105fe28d38>
      +#> function(t,L1,L2,L3,t1,t3) {
      +#>     r <- (L3-L2)/(L2-L1)
      +#>     L1+(L3-L1)*((1-r^(2*((t-t1)/(t3-t1))))/(1-r^2))
      +#>   }
      +#> <bytecode: 0x00000237efc7abd0>
      +#> <environment: 0x00000237ef8cd408>
       
       ## Simple Examples -- Gompertz
       ( gomp1 <- GompertzFuns() )
      -#> function (t, Linf, gi = NULL, ti = NULL) 
      -#> {
      -#>     if (length(Linf) == 3) {
      -#>         gi <- Linf[[2]]
      -#>         ti <- Linf[[3]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     Linf * exp(-exp(-gi * (t - ti)))
      -#> }
      -#> <bytecode: 0x000002105fe99ad0>
      -#> <environment: 0x000002105febb3c8>
      +#> function(t,Linf,gi=NULL,ti=NULL) {
      +#>     if (length(Linf)==3) { gi <- Linf[[2]]
      +#>     ti <- Linf[[3]]
      +#>     Linf <- Linf[[1]] }
      +#>     Linf*exp(-exp(-gi*(t-ti)))
      +#>   }
      +#> <bytecode: 0x00000237ef806420>
      +#> <environment: 0x00000237ef7cd368>
       plot(gomp1(ages,Linf=800,gi=0.5,ti=5)~ages,type="b",pch=19)
       
       ( gomp2 <- GompertzFuns("Ricker2") )
      -#> function (t, L0, a = NULL, gi = NULL) 
      -#> {
      -#>     if (length(L0) == 3) {
      -#>         a <- L0[[2]]
      -#>         gi <- L0[[3]]
      -#>         L0 <- L0[[1]]
      -#>     }
      -#>     L0 * exp(a * (1 - exp(-gi * t)))
      -#> }
      -#> <bytecode: 0x000002105fe987c8>
      -#> <environment: 0x000002105fff2bf0>
      +#> function(t,L0,a=NULL,gi=NULL) {
      +#>     if (length(L0)==3) { a <- L0[[2]]
      +#>     gi <- L0[[3]]
      +#>     L0 <- L0[[1]] }
      +#>     L0*exp(a*(1-exp(-gi*t)))
      +#>   }
      +#> <bytecode: 0x00000237ef7ff560>
      +#> <environment: 0x00000237ef56b5f0>
       plot(gomp2(ages,L0=2,a=6,gi=0.5)~ages,type="b",pch=19)
       
       ( gomp2c <- GompertzFuns("Ricker2",simple=TRUE) )   # compare to gomp2
      -#> function (t, L0, a, gi) 
      -#> {
      -#>     L0 * exp(a * (1 - exp(-gi * t)))
      -#> }
      -#> <bytecode: 0x000002105fea9a70>
      -#> <environment: 0x0000021060118300>
      +#> function(t,L0,a,gi) {
      +#>     L0*exp(a*(1-exp(-gi*t)))
      +#>   }
      +#> <bytecode: 0x00000237ef7f3ce0>
      +#> <environment: 0x00000237ef3306a0>
       ( gompT <- GompertzFuns("Troynikov1"))
      -#> function (Lm, dt, Linf, gi = NULL) 
      -#> {
      -#>     if (length(Linf) == 2) {
      -#>         gi <- Linf[2]
      -#>         Linf <- Linf[1]
      -#>     }
      -#>     Linf * ((Lm/Linf)^exp(-gi * dt)) - Lm
      -#> }
      -#> <bytecode: 0x000002105fe9aa40>
      -#> <environment: 0x00000210601954b8>
      +#> function(Lm,dt,Linf,gi=NULL) {
      +#>     if (length(Linf)==2) { gi <- Linf[2]
      +#>     Linf <- Linf[1] }
      +#>     Linf*((Lm/Linf)^exp(-gi*dt))-Lm
      +#>   }
      +#> <bytecode: 0x00000237ef7e0e90>
      +#> <environment: 0x00000237ef294790>
       
       ## Simple Examples -- Richards
       ( rich1 <- RichardsFuns() )
      -#> function (t, Linf, k = NULL, a = NULL, b = NULL) 
      -#> {
      -#>     if (length(Linf) == 4) {
      -#>         k <- Linf[[2]]
      -#>         a <- Linf[[3]]
      -#>         b <- Linf[[4]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     Linf * (1 - a * exp(-k * t))^b
      -#> }
      -#> <bytecode: 0x00000210602287a0>
      -#> <environment: 0x00000210602828f8>
      +#> function(t,Linf,k=NULL,a=NULL,b=NULL) {
      +#>     if (length(Linf)==4) { k <- Linf[[2]]
      +#>     a <- Linf[[3]]
      +#>     b <- Linf[[4]]
      +#>     Linf <- Linf[[1]] }
      +#>     Linf*(1-a*exp(-k*t))^b
      +#>   }
      +#> <bytecode: 0x00000237ef1bca70>
      +#> <environment: 0x00000237ef16da08>
       plot(rich1(ages,Linf=800,k=0.5,a=1,b=6)~ages,type="b",pch=19)
       
       ( rich2 <- RichardsFuns(2) )
      -#> function (t, Linf, k = NULL, ti = NULL, b = NULL) 
      -#> {
      -#>     if (length(Linf) == 4) {
      -#>         k <- Linf[[2]]
      -#>         ti <- Linf[[3]]
      -#>         b <- Linf[[4]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     Linf * (1 - (1/b) * exp(-k * (t - ti)))^b
      -#> }
      -#> <bytecode: 0x0000021060220d80>
      -#> <environment: 0x00000210607a7a60>
      +#> function(t,Linf,k=NULL,ti=NULL,b=NULL) {
      +#>     if (length(Linf)==4) { k <- Linf[[2]]
      +#>     ti <- Linf[[3]]
      +#>     b <- Linf[[4]]
      +#>     Linf <- Linf[[1]] }
      +#>     Linf*(1-(1/b)*exp(-k*(t-ti)))^b
      +#>   }
      +#> <bytecode: 0x00000237ef1b07a8>
      +#> <environment: 0x00000237eefc9e58>
       plot(rich2(ages,Linf=800,k=0.5,ti=3,b=6)~ages,type="b",pch=19)
       
       ( rich3 <- RichardsFuns(3) )
      -#> function (t, Linf, k = NULL, ti = NULL, b = NULL) 
      -#> {
      -#>     if (length(Linf) == 4) {
      -#>         k <- Linf[[2]]
      -#>         ti <- Linf[[3]]
      -#>         b <- Linf[[4]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     Linf/((1 + b * exp(-k * (t - ti)))^(1/b))
      -#> }
      -#> <bytecode: 0x000002106022d278>
      -#> <environment: 0x0000021060932480>
      +#> function(t,Linf,k=NULL,ti=NULL,b=NULL) {
      +#>     if (length(Linf)==4) { k <- Linf[[2]]
      +#>     ti <- Linf[[3]]
      +#>     b <- Linf[[4]]
      +#>     Linf <- Linf[[1]] }
      +#>     Linf/((1+b*exp(-k*(t-ti)))^(1/b))
      +#>   }
      +#> <bytecode: 0x00000237ef1aa938>
      +#> <environment: 0x00000237eeea2408>
       plot(rich3(ages,Linf=800,k=0.5,ti=3,b=0.15)~ages,type="b",pch=19)
       
       ( rich4 <- RichardsFuns(4) )
      -#> function (t, Linf, k = NULL, ti = NULL, b = NULL) 
      -#> {
      -#>     if (length(Linf) == 4) {
      -#>         k <- Linf[[2]]
      -#>         ti <- Linf[[3]]
      -#>         b <- Linf[[4]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     Linf * (1 + (b - 1) * exp(-k * (t - ti)))^(1/(1 - b))
      -#> }
      -#> <bytecode: 0x000002106021e088>
      -#> <environment: 0x0000021060a2b550>
      +#> function(t,Linf,k=NULL,ti=NULL,b=NULL) {
      +#>     if (length(Linf)==4) { k <- Linf[[2]]
      +#>     ti <- Linf[[3]]
      +#>     b <- Linf[[4]]
      +#>     Linf <- Linf[[1]] }
      +#>     Linf*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))
      +#>   }
      +#> <bytecode: 0x00000237ef1a1288>
      +#> <environment: 0x00000237eed8fbb8>
       plot(rich4(ages,Linf=800,k=0.5,ti=3,b=0.95)~ages,type="b",pch=19)
       lines(rich4(ages,Linf=800,k=0.5,ti=3,b=1.5)~ages,type="b",pch=19,col="blue")
       
       ( rich5 <- RichardsFuns(5) )
      -#> function (t, Linf, k = NULL, L0 = NULL, b = NULL) 
      -#> {
      -#>     if (length(Linf) == 4) {
      -#>         k <- Linf[[2]]
      -#>         L0 <- Linf[[3]]
      -#>         b <- Linf[[4]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     Linf * (1 + (((L0/Linf)^(1 - b)) - 1) * exp(-k * t))^(1/(1 - 
      -#>         b))
      -#> }
      -#> <bytecode: 0x0000021060239c78>
      -#> <environment: 0x0000021060bb3f78>
      +#> function(t,Linf,k=NULL,L0=NULL,b=NULL) {
      +#>     if (length(Linf)==4) { k <- Linf[[2]]
      +#>     L0 <- Linf[[3]]
      +#>     b <- Linf[[4]]
      +#>     Linf <- Linf[[1]] }
      +#>     Linf*(1+(((L0/Linf)^(1-b))-1)*exp(-k*t))^(1/(1-b))
      +#>   }
      +#> <bytecode: 0x00000237ef195b80>
      +#> <environment: 0x00000237eebe29d0>
       plot(rich5(ages,Linf=800,k=0.5,L0=50,b=1.5)~ages,type="b",pch=19)
       
       ( rich6 <- RichardsFuns(6) )
      -#> function (t, Linf, k = NULL, ti = NULL, Lninf = NULL, b = NULL) 
      -#> {
      -#>     if (length(Linf) == 5) {
      -#>         k <- Linf[[2]]
      -#>         ti <- Linf[[3]]
      -#>         Lninf <- Linf[[3]]
      -#>         b <- Linf[[4]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     Lninf + (Linf - Lninf) * (1 + (b - 1) * exp(-k * (t - ti)))^(1/(1 - 
      -#>         b))
      -#> }
      -#> <bytecode: 0x00000210602562f0>
      -#> <environment: 0x0000021060cb1278>
      +#> function(t,Linf,k=NULL,ti=NULL,Lninf=NULL,b=NULL) {
      +#>     if (length(Linf)==5) { k <- Linf[[2]]
      +#>     ti <- Linf[[3]]
      +#>     Lninf <- Linf[[3]]
      +#>     b <- Linf[[4]]
      +#>     Linf <- Linf[[1]] }
      +#>     Lninf+(Linf-Lninf)*(1+(b-1)*exp(-k*(t-ti)))^(1/(1-b))
      +#>   }
      +#> <bytecode: 0x00000237ef170720>
      +#> <environment: 0x00000237eeaaa850>
       plot(rich6(ages,Linf=800,k=0.5,ti=3,Lninf=50,b=1.5)~ages,type="b",pch=19)
       
       ( rich2c <- RichardsFuns(2,simple=TRUE) ) # compare to rich2
      -#> function (t, Linf, k, ti, b) 
      -#> {
      -#>     Linf * (1 - (1/b) * exp(-k * (t - ti)))^b
      -#> }
      -#> <bytecode: 0x000002106022dba8>
      -#> <environment: 0x0000021060de43a0>
      +#> function(t,Linf,k,ti,b) {
      +#>     Linf*(1-(1/b)*exp(-k*(t-ti)))^b
      +#>   }
      +#> <bytecode: 0x00000237ef19dc08>
      +#> <environment: 0x00000237ee9845e0>
       
       ## Simple Examples -- Logistic
       ( log1 <- logisticFuns() )
      -#> function (t, Linf, gninf = NULL, ti = NULL) 
      -#> {
      -#>     if (length(Linf) == 3) {
      -#>         gninf <- Linf[[2]]
      -#>         ti <- Linf[[3]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     Linf/(1 + exp(-gninf * (t - ti)))
      -#> }
      -#> <bytecode: 0x0000021060e649a0>
      -#> <environment: 0x0000021060e6f0b8>
      +#> function(t,Linf,gninf=NULL,ti=NULL) {
      +#>     if (length(Linf)==3) { gninf <- Linf[[2]]
      +#>     ti <- Linf[[3]]
      +#>     Linf <- Linf[[1]] }
      +#>     Linf/(1+exp(-gninf*(t-ti)))
      +#>   }
      +#> <bytecode: 0x00000237ee901db8>
      +#> <environment: 0x00000237ee8ca0f0>
       plot(log1(ages,Linf=800,gninf=0.5,ti=5)~ages,type="b",pch=19)
       
       ( log2 <- logisticFuns("CJ2") )
      -#> function (t, Linf, gninf = NULL, a = NULL) 
      -#> {
      -#>     if (length(Linf) == 3) {
      -#>         gninf <- Linf[[2]]
      -#>         a <- Linf[[3]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     Linf/(1 + a * exp(-gninf * t))
      -#> }
      -#> <bytecode: 0x0000021060e61588>
      -#> <environment: 0x0000021060f701a8>
      +#> function(t,Linf,gninf=NULL,a=NULL) {
      +#>     if (length(Linf)==3) { gninf <- Linf[[2]]
      +#>     a <- Linf[[3]]
      +#>     Linf <- Linf[[1]] }
      +#>     Linf/(1+a*exp(-gninf*t))
      +#>   }
      +#> <bytecode: 0x00000237ee8dedc8>
      +#> <environment: 0x00000237ee7d9270>
       plot(log2(ages,Linf=800,gninf=0.5,a=10)~ages,type="b",pch=19)
       
       ( log2c <- logisticFuns("CJ2",simple=TRUE) ) # compare to log2
      -#> function (t, Linf, gninf, a) 
      -#> {
      -#>     Linf/(1 + a * exp(-gninf * t))
      -#> }
      -#> <bytecode: 0x0000021060e60910>
      -#> <environment: 0x00000210610cf738>
      +#> function(t,Linf,gninf,a) {
      +#>     Linf/(1+a*exp(-gninf*t))
      +#>   }
      +#> <bytecode: 0x00000237ee8eb4b8>
      +#> <environment: 0x00000237ee6d17b0>
       ( log3 <- logisticFuns("Karkach") )
      -#> function (t, Linf, L0 = NULL, gninf = NULL) 
      -#> {
      -#>     if (length(Linf) == 3) {
      -#>         L0 <- Linf[[2]]
      -#>         gninf <- Linf[[3]]
      -#>         Linf <- Linf[[1]]
      -#>     }
      -#>     L0 * Linf/(L0 + (Linf - L0) * exp(-gninf * t))
      -#> }
      -#> <bytecode: 0x0000021060e60360>
      -#> <environment: 0x00000210611991f0>
      +#> function(t,Linf,L0=NULL,gninf=NULL) {
      +#>     if (length(Linf)==3) { L0 <- Linf[[2]]
      +#>     gninf <- Linf[[3]]
      +#>     Linf <- Linf[[1]] }
      +#>     L0*Linf/(L0+(Linf-L0)*exp(-gninf*t))
      +#>   }
      +#> <bytecode: 0x00000237ee8dbcf8>
      +#> <environment: 0x00000237ee67f8d0>
       plot(log3(ages,L0=10,Linf=800,gninf=0.5)~ages,type="b",pch=19)
       
       ( log4 <- logisticFuns("Haddon") )
      -#> function (Lm, dLmax, L50 = NULL, L95 = NULL) 
      -#> {
      -#>     if (length(dLmax) == 3) {
      -#>         L50 <- dLmax[2]
      -#>         L95 <- dLmax[3]
      -#>         dLmax <- dLmax[1]
      -#>     }
      -#>     dLmax/(1 + exp(log(19) * ((Lm - L50)/(L95 - L50))))
      -#> }
      -#> <bytecode: 0x0000021060e72d20>
      -#> <environment: 0x000002106129ba48>
      +#> function(Lm,dLmax,L50=NULL,L95=NULL) {
      +#>     if (length(dLmax)==3) { L50 <- dLmax[2]
      +#>     L95 <- dLmax[3]
      +#>     dLmax <- dLmax[1] }
      +#>     dLmax/(1+exp(log(19)*((Lm-L50)/(L95-L50))))
      +#>   }
      +#> <bytecode: 0x00000237ee8e30f8>
      +#> <environment: 0x00000237ee585ba0>
       
       
       ###########################################################
      @@ -853,11 +795,11 @@ 

      Examples -

      Developed by Derek Ogle, Jason Doll, Powell Wheeler, Alexis Dinno.

      +

      Developed by Derek H. Ogle, Jason C. Doll, A. Powell Wheeler, Alexis Dinno.

  • diff --git a/docs/reference/headtail.html b/docs/reference/headtail.html index 401aa094..2f2b54a4 100644 --- a/docs/reference/headtail.html +++ b/docs/reference/headtail.html @@ -1,5 +1,5 @@ -Shows rows from the head and tail of a data frame or matrix. — headtail • FSAShows rows from the head and tail of a data frame or matrix. — headtail • FSA @@ -10,7 +10,7 @@ FSA - 0.9.3.9000 + 0.9.4