Skip to content

Commit 3c7f41c

Browse files
hotchemiakarnokd
authored andcommitted
Remove tabs indent and redundant space. (#4343)
1 parent 9074410 commit 3c7f41c

File tree

1 file changed

+89
-89
lines changed

1 file changed

+89
-89
lines changed

src/main/java/rx/Scheduler.java

Lines changed: 89 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
/**
22
* Copyright 2014 Netflix, Inc.
3-
*
3+
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.
66
* You may obtain a copy of the License at
7-
*
7+
*
88
* http://www.apache.org/licenses/LICENSE-2.0
9-
*
9+
*
1010
* Unless required by applicable law or agreed to in writing, software
1111
* distributed under the License is distributed on an "AS IS" BASIS,
1212
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -45,8 +45,8 @@ public abstract class Scheduler {
4545
* maintenance.
4646
*/
4747

48-
/**
49-
* The tolerance for a clock drift in nanoseconds where the periodic scheduler will rebase.
48+
/**
49+
* The tolerance for a clock drift in nanoseconds where the periodic scheduler will rebase.
5050
* <p>
5151
* The associated system parameter, {@code rx.scheduler.drift-tolerance}, expects its value in minutes.
5252
*/
@@ -55,14 +55,14 @@ public abstract class Scheduler {
5555
CLOCK_DRIFT_TOLERANCE_NANOS = TimeUnit.MINUTES.toNanos(
5656
Long.getLong("rx.scheduler.drift-tolerance", 15));
5757
}
58-
58+
5959
/**
6060
* Retrieves or creates a new {@link Scheduler.Worker} that represents serial execution of actions.
6161
* <p>
6262
* When work is completed it should be unsubscribed using {@link Scheduler.Worker#unsubscribe()}.
6363
* <p>
6464
* Work on a {@link Scheduler.Worker} is guaranteed to be sequential.
65-
*
65+
*
6666
* @return a Worker representing a serial queue of actions to be executed
6767
*/
6868
public abstract Worker createWorker();
@@ -76,7 +76,7 @@ public abstract static class Worker implements Subscription {
7676

7777
/**
7878
* Schedules an Action for execution.
79-
*
79+
*
8080
* @param action
8181
* Action to schedule
8282
* @return a subscription to be able to unsubscribe the action (unschedule it if not executed)
@@ -107,7 +107,7 @@ public abstract static class Worker implements Subscription {
107107
* <p>
108108
* Note to implementors: non-positive {@code initialTime} and {@code period} should be regarded as
109109
* undelayed scheduling of the first and any subsequent executions.
110-
*
110+
*
111111
* @param action
112112
* the Action to execute periodically
113113
* @param initialDelay
@@ -127,7 +127,7 @@ public Subscription schedulePeriodically(final Action0 action, long initialDelay
127127

128128
final SequentialSubscription first = new SequentialSubscription();
129129
final SequentialSubscription mas = new SequentialSubscription(first);
130-
130+
131131
final Action0 recursiveAction = new Action0() {
132132
long count;
133133
long lastNowNanos = firstNowNanos;
@@ -137,9 +137,9 @@ public void call() {
137137
action.call();
138138

139139
if (!mas.isUnsubscribed()) {
140-
140+
141141
long nextTick;
142-
142+
143143
long nowNanos = TimeUnit.MILLISECONDS.toNanos(now());
144144
// If the clock moved in a direction quite a bit, rebase the repetition period
145145
if (nowNanos + CLOCK_DRIFT_TOLERANCE_NANOS < lastNowNanos
@@ -154,7 +154,7 @@ public void call() {
154154
nextTick = startInNanos + (++count * periodInNanos);
155155
}
156156
lastNowNanos = nowNanos;
157-
157+
158158
long delay = nextTick - nowNanos;
159159
mas.replace(schedule(this, delay, TimeUnit.NANOSECONDS));
160160
}
@@ -183,82 +183,82 @@ public long now() {
183183
return System.currentTimeMillis();
184184
}
185185

186-
/**
187-
* Allows the use of operators for controlling the timing around when
188-
* actions scheduled on workers are actually done. This makes it possible to
189-
* layer additional behavior on this {@link Scheduler}. The only parameter
190-
* is a function that flattens an {@link Observable} of {@link Observable}
191-
* of {@link Completable}s into just one {@link Completable}. There must be
192-
* a chain of operators connecting the returned value to the source
193-
* {@link Observable} otherwise any work scheduled on the returned
194-
* {@link Scheduler} will not be executed.
195-
* <p>
196-
* When {@link Scheduler#createWorker()} is invoked a {@link Observable} of
197-
* {@link Completable}s is onNext'd to the combinator to be flattened. If
198-
* the inner {@link Observable} is not immediately subscribed to an calls to
199-
* {@link Worker#schedule} are buffered. Once the {@link Observable} is
200-
* subscribed to actions are then onNext'd as {@link Completable}s.
201-
* <p>
202-
* Finally the actions scheduled on the parent {@link Scheduler} when the
203-
* inner most {@link Completable}s are subscribed to.
204-
* <p>
205-
* When the {@link Worker} is unsubscribed the {@link Completable} emits an
206-
* onComplete and triggers any behavior in the flattening operator. The
207-
* {@link Observable} and all {@link Completable}s give to the flattening
208-
* function never onError.
209-
* <p>
210-
* Limit the amount concurrency two at a time without creating a new fix
211-
* size thread pool:
212-
*
213-
* <pre>
214-
* Scheduler limitSched = Schedulers.computation().when(workers -> {
215-
* // use merge max concurrent to limit the number of concurrent
216-
* // callbacks two at a time
217-
* return Completable.merge(Observable.merge(workers), 2);
218-
* });
219-
* </pre>
220-
* <p>
221-
* This is a slightly different way to limit the concurrency but it has some
222-
* interesting benefits and drawbacks to the method above. It works by
223-
* limited the number of concurrent {@link Worker}s rather than individual
224-
* actions. Generally each {@link Observable} uses its own {@link Worker}.
225-
* This means that this will essentially limit the number of concurrent
226-
* subscribes. The danger comes from using operators like
227-
* {@link Observable#zip(Observable, Observable, rx.functions.Func2)} where
228-
* subscribing to the first {@link Observable} could deadlock the
229-
* subscription to the second.
230-
*
231-
* <pre>
232-
* Scheduler limitSched = Schedulers.computation().when(workers -> {
233-
* // use merge max concurrent to limit the number of concurrent
234-
* // Observables two at a time
235-
* return Completable.merge(Observable.merge(workers, 2));
236-
* });
237-
* </pre>
238-
*
239-
* Slowing down the rate to no more than than 1 a second. This suffers from
240-
* the same problem as the one above I could find an {@link Observable}
241-
* operator that limits the rate without dropping the values (aka leaky
242-
* bucket algorithm).
243-
*
244-
* <pre>
245-
* Scheduler slowSched = Schedulers.computation().when(workers -> {
246-
* // use concatenate to make each worker happen one at a time.
247-
* return Completable.concat(workers.map(actions -> {
248-
* // delay the starting of the next worker by 1 second.
249-
* return Completable.merge(actions.delaySubscription(1, TimeUnit.SECONDS));
250-
* }));
251-
* });
252-
* </pre>
253-
*
254-
* @param <S> a Scheduler and a Subscription
255-
* @param combine the function that takes a two-level nested Observable sequence of a Completable and returns
256-
* the Completable that will be subscribed to and should trigger the execution of the scheduled Actions.
257-
* @return the Scheduler with the customized execution behavior
258-
*/
186+
/**
187+
* Allows the use of operators for controlling the timing around when
188+
* actions scheduled on workers are actually done. This makes it possible to
189+
* layer additional behavior on this {@link Scheduler}. The only parameter
190+
* is a function that flattens an {@link Observable} of {@link Observable}
191+
* of {@link Completable}s into just one {@link Completable}. There must be
192+
* a chain of operators connecting the returned value to the source
193+
* {@link Observable} otherwise any work scheduled on the returned
194+
* {@link Scheduler} will not be executed.
195+
* <p>
196+
* When {@link Scheduler#createWorker()} is invoked a {@link Observable} of
197+
* {@link Completable}s is onNext'd to the combinator to be flattened. If
198+
* the inner {@link Observable} is not immediately subscribed to an calls to
199+
* {@link Worker#schedule} are buffered. Once the {@link Observable} is
200+
* subscribed to actions are then onNext'd as {@link Completable}s.
201+
* <p>
202+
* Finally the actions scheduled on the parent {@link Scheduler} when the
203+
* inner most {@link Completable}s are subscribed to.
204+
* <p>
205+
* When the {@link Worker} is unsubscribed the {@link Completable} emits an
206+
* onComplete and triggers any behavior in the flattening operator. The
207+
* {@link Observable} and all {@link Completable}s give to the flattening
208+
* function never onError.
209+
* <p>
210+
* Limit the amount concurrency two at a time without creating a new fix
211+
* size thread pool:
212+
*
213+
* <pre>
214+
* Scheduler limitSched = Schedulers.computation().when(workers -> {
215+
* // use merge max concurrent to limit the number of concurrent
216+
* // callbacks two at a time
217+
* return Completable.merge(Observable.merge(workers), 2);
218+
* });
219+
* </pre>
220+
* <p>
221+
* This is a slightly different way to limit the concurrency but it has some
222+
* interesting benefits and drawbacks to the method above. It works by
223+
* limited the number of concurrent {@link Worker}s rather than individual
224+
* actions. Generally each {@link Observable} uses its own {@link Worker}.
225+
* This means that this will essentially limit the number of concurrent
226+
* subscribes. The danger comes from using operators like
227+
* {@link Observable#zip(Observable, Observable, rx.functions.Func2)} where
228+
* subscribing to the first {@link Observable} could deadlock the
229+
* subscription to the second.
230+
*
231+
* <pre>
232+
* Scheduler limitSched = Schedulers.computation().when(workers -> {
233+
* // use merge max concurrent to limit the number of concurrent
234+
* // Observables two at a time
235+
* return Completable.merge(Observable.merge(workers, 2));
236+
* });
237+
* </pre>
238+
*
239+
* Slowing down the rate to no more than than 1 a second. This suffers from
240+
* the same problem as the one above I could find an {@link Observable}
241+
* operator that limits the rate without dropping the values (aka leaky
242+
* bucket algorithm).
243+
*
244+
* <pre>
245+
* Scheduler slowSched = Schedulers.computation().when(workers -> {
246+
* // use concatenate to make each worker happen one at a time.
247+
* return Completable.concat(workers.map(actions -> {
248+
* // delay the starting of the next worker by 1 second.
249+
* return Completable.merge(actions.delaySubscription(1, TimeUnit.SECONDS));
250+
* }));
251+
* });
252+
* </pre>
253+
*
254+
* @param <S> a Scheduler and a Subscription
255+
* @param combine the function that takes a two-level nested Observable sequence of a Completable and returns
256+
* the Completable that will be subscribed to and should trigger the execution of the scheduled Actions.
257+
* @return the Scheduler with the customized execution behavior
258+
*/
259259
@SuppressWarnings("unchecked")
260260
@Experimental
261-
public <S extends Scheduler & Subscription> S when(Func1<Observable<Observable<Completable>>, Completable> combine) {
262-
return (S) new SchedulerWhen(combine, this);
263-
}
261+
public <S extends Scheduler & Subscription> S when(Func1<Observable<Observable<Completable>>, Completable> combine) {
262+
return (S) new SchedulerWhen(combine, this);
263+
}
264264
}

0 commit comments

Comments
 (0)