66from .base_client import BaseHumanloop , AsyncBaseHumanloop
77from .environment import HumanloopEnvironment
88from .eval_utils import _run_eval , Dataset , File , Evaluator , EvaluatorCheck
9- from .base_client import EvaluationsClient
9+ from .prompts .client import PromptsClient
10+ from .evaluations .client import EvaluationsClient
11+ from .prompt_utils import populate_template
1012
11- class ExtendedEvalsClient (EvaluationsClient ):
1213
14+ class ExtendedEvalsClient (EvaluationsClient ):
1315 client : BaseHumanloop
1416
1517 def run (
@@ -21,8 +23,8 @@ def run(
2123 # logs: typing.Sequence[dict] | None = None,
2224 workers : int = 4 ,
2325 ) -> List [EvaluatorCheck ]:
24- """
25- Evaluate your function for a given `Dataset` and set of `Evaluators`.
26+ """Evaluate your function for a given `Dataset` and set of `Evaluators`.
27+
2628 :param file: the Humanloop file being evaluated, including a function to run over the dataset.
2729 :param name: the name of the Evaluation to run. If it does not exist, a new Evaluation will be created under your File.
2830 :param dataset: the dataset to map your function over to produce the outputs required by the Evaluation.
@@ -43,6 +45,10 @@ def run(
4345 )
4446
4547
48+ class ExtendedPromptsClient (PromptsClient ):
49+ populate_template = staticmethod (populate_template )
50+
51+
4652class Humanloop (BaseHumanloop ):
4753 """
4854 See docstring of BaseHumanloop.
@@ -75,6 +81,7 @@ def __init__(
7581 eval_client = ExtendedEvalsClient (client_wrapper = self ._client_wrapper )
7682 eval_client .client = self
7783 self .evaluations = eval_client
84+ self .prompts = ExtendedPromptsClient (client_wrapper = self ._client_wrapper )
7885
7986
8087class AsyncHumanloop (AsyncBaseHumanloop ):
@@ -84,4 +91,4 @@ class AsyncHumanloop(AsyncBaseHumanloop):
8491 TODO: Add custom evaluation utilities for async case.
8592 """
8693
87- pass
94+ pass
0 commit comments